repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 16,508
|
Projects/NUCLEO-WL55JC/Templates_LL/SingleCore/MDK-ARM/startup_stm32wl55xx_cm4.s
|
;********************************************************************************
;* File Name : startup_stm32wl55xx_cm4.s
;* Author : MCD Application Team
;* Description : STM32WL55xx devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM4 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;********************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file
;* in the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;********************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window WatchDog
DCD PVD_PVM_IRQHandler ; PVD and PVM detector
DCD TAMP_STAMP_LSECSS_SSRU_IRQHandler ; RTC Tamper, RTC TimeStamp, LSECSS and RTC SSR Underflow Interrupts
DCD RTC_WKUP_IRQHandler ; RTC Wakeup Interrupt
DCD FLASH_IRQHandler ; FLASH global Interrupt
DCD RCC_IRQHandler ; RCC Interrupt
DCD EXTI0_IRQHandler ; EXTI Line 0 Interrupt
DCD EXTI1_IRQHandler ; EXTI Line 1 Interrupt
DCD EXTI2_IRQHandler ; EXTI Line 2 Interrupt
DCD EXTI3_IRQHandler ; EXTI Line 3 Interrupt
DCD EXTI4_IRQHandler ; EXTI Line 4 Interrupt
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 Interrupt
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 Interrupt
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 Interrupt
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 Interrupt
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 Interrupt
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 Interrupt
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 Interrupt
DCD ADC_IRQHandler ; ADC Interrupt
DCD DAC_IRQHandler ; DAC Interrupt
DCD C2SEV_PWR_C2H_IRQHandler ; CPU M0+ SEV and PWR CPU M0+ HOLD wakeup Interrupt
DCD COMP_IRQHandler ; COMP1 and COMP2 Interrupts
DCD EXTI9_5_IRQHandler ; EXTI Lines [9:5] Interrupt
DCD TIM1_BRK_IRQHandler ; TIM1 Break Interrupt
DCD TIM1_UP_IRQHandler ; TIM1 Update Interrupts
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Communication Interrupts
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare Interrupt
DCD TIM2_IRQHandler ; TIM2 Global Interrupt
DCD TIM16_IRQHandler ; TIM16 Global Interrupt
DCD TIM17_IRQHandler ; TIM17 Global Interrupt
DCD I2C1_EV_IRQHandler ; I2C1 Event Interrupt
DCD I2C1_ER_IRQHandler ; I2C1 Error Interrupt
DCD I2C2_EV_IRQHandler ; I2C2 Event Interrupt
DCD I2C2_ER_IRQHandler ; I2C2 Error Interrupt
DCD SPI1_IRQHandler ; SPI1 Interrupt
DCD SPI2_IRQHandler ; SPI2 Interrupt
DCD USART1_IRQHandler ; USART1 Interrupt
DCD USART2_IRQHandler ; USART2 Interrupt
DCD LPUART1_IRQHandler ; LPUART1 Interrupt
DCD LPTIM1_IRQHandler ; LPTIM1 Interrupt
DCD LPTIM2_IRQHandler ; LPTIM2 Interrupt
DCD EXTI15_10_IRQHandler ; EXTI Lines1[15:10 ]Interrupts
DCD RTC_Alarm_IRQHandler ; RTC Alarms (A and B) Interrupt
DCD LPTIM3_IRQHandler ; LPTIM3 Interrupt
DCD SUBGHZSPI_IRQHandler ; SUBGHZSPI Interrupt
DCD IPCC_C1_RX_IRQHandler ; IPCC CPU1 RX occupied interrupt
DCD IPCC_C1_TX_IRQHandler ; IPCC CPU1 RX free interrupt
DCD HSEM_IRQHandler ; HSEM0 Interrupt
DCD I2C3_EV_IRQHandler ; I2C3 Event Interrupt
DCD I2C3_ER_IRQHandler ; I2C3 Error Interrupt
DCD SUBGHZ_Radio_IRQHandler ; SUBGHZ Radio Interrupt
DCD AES_IRQHandler ; AES Interrupt
DCD RNG_IRQHandler ; RNG1 Interrupt
DCD PKA_IRQHandler ; PKA Interrupt
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel 1 Interrupt
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel 2 Interrupt
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel 3 Interrupt
DCD DMA2_Channel4_IRQHandler ; DMA2 Channel 4 Interrupt
DCD DMA2_Channel5_IRQHandler ; DMA2 Channel 5 Interrupt
DCD DMA2_Channel6_IRQHandler ; DMA2 Channel 6 Interrupt
DCD DMA2_Channel7_IRQHandler ; DMA2 Channel 7 Interrupt
DCD DMAMUX1_OVR_IRQHandler ; DMAMUX overrun Interrupt
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_PVM_IRQHandler [WEAK]
EXPORT TAMP_STAMP_LSECSS_SSRU_IRQHandler [WEAK]
EXPORT RTC_WKUP_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC_IRQHandler [WEAK]
EXPORT DAC_IRQHandler [WEAK]
EXPORT C2SEV_PWR_C2H_IRQHandler [WEAK]
EXPORT COMP_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT LPUART1_IRQHandler [WEAK]
EXPORT LPTIM1_IRQHandler [WEAK]
EXPORT LPTIM2_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTC_Alarm_IRQHandler [WEAK]
EXPORT LPTIM3_IRQHandler [WEAK]
EXPORT SUBGHZSPI_IRQHandler [WEAK]
EXPORT IPCC_C1_RX_IRQHandler [WEAK]
EXPORT IPCC_C1_TX_IRQHandler [WEAK]
EXPORT HSEM_IRQHandler [WEAK]
EXPORT I2C3_EV_IRQHandler [WEAK]
EXPORT I2C3_ER_IRQHandler [WEAK]
EXPORT SUBGHZ_Radio_IRQHandler [WEAK]
EXPORT AES_IRQHandler [WEAK]
EXPORT RNG_IRQHandler [WEAK]
EXPORT PKA_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_IRQHandler [WEAK]
EXPORT DMA2_Channel5_IRQHandler [WEAK]
EXPORT DMA2_Channel6_IRQHandler [WEAK]
EXPORT DMA2_Channel7_IRQHandler [WEAK]
EXPORT DMAMUX1_OVR_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_PVM_IRQHandler
TAMP_STAMP_LSECSS_SSRU_IRQHandler
RTC_WKUP_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC_IRQHandler
DAC_IRQHandler
C2SEV_PWR_C2H_IRQHandler
COMP_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
LPUART1_IRQHandler
LPTIM1_IRQHandler
LPTIM2_IRQHandler
EXTI15_10_IRQHandler
RTC_Alarm_IRQHandler
LPTIM3_IRQHandler
SUBGHZSPI_IRQHandler
IPCC_C1_RX_IRQHandler
IPCC_C1_TX_IRQHandler
HSEM_IRQHandler
I2C3_EV_IRQHandler
I2C3_ER_IRQHandler
SUBGHZ_Radio_IRQHandler
AES_IRQHandler
RNG_IRQHandler
PKA_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_IRQHandler
DMA2_Channel5_IRQHandler
DMA2_Channel6_IRQHandler
DMA2_Channel7_IRQHandler
DMAMUX1_OVR_IRQHandler
B .
ENDP
ALIGN
;********************************************************************************
; User Stack and Heap initialization
;********************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 17,624
|
Projects/NUCLEO-WL55JC/Templates_LL/SingleCore/EWARM/startup_stm32wl55xx_cm4.s
|
;********************************************************************************
;* File Name : startup_stm32wl55xx_cm4.s
;* Author : MCD Application Team
;* Description : M4 core vector table of the STM32WLxxxx devices for the
;* IAR (EWARM) toolchain.
;*
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == _iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address.
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M4 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;********************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file
;* in the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;********************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window WatchDog
DCD PVD_PVM_IRQHandler ; PVD and PVM Interrupt
DCD TAMP_STAMP_LSECSS_SSRU_IRQHandler ; RTC Tamper, RTC TimeStamp, LSECSS and RTC SSRU Interrupts
DCD RTC_WKUP_IRQHandler ; RTC Wakeup Interrupt
DCD FLASH_IRQHandler ; FLASH global Interrupt
DCD RCC_IRQHandler ; RCC Interrupt
DCD EXTI0_IRQHandler ; EXTI Line 0 Interrupt
DCD EXTI1_IRQHandler ; EXTI Line 1 Interrupt
DCD EXTI2_IRQHandler ; EXTI Line 2 Interrupt
DCD EXTI3_IRQHandler ; EXTI Line 3 Interrupt
DCD EXTI4_IRQHandler ; EXTI Line 4 Interrupt
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 Interrupt
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 Interrupt
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 Interrupt
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 Interrupt
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 Interrupt
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 Interrupt
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 Interrupt
DCD ADC_IRQHandler ; ADC Interrupt
DCD DAC_IRQHandler ; DAC Interrupt
DCD C2SEV_PWR_C2H_IRQHandler ; CPU M0+ SEV Interrupt
DCD COMP_IRQHandler ; COMP1 and COMP2 Interrupts
DCD EXTI9_5_IRQHandler ; EXTI Lines [9:5] Interrupt
DCD TIM1_BRK_IRQHandler ; TIM1 Break Interrupt
DCD TIM1_UP_IRQHandler ; TIM1 Update Interrupt
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Communication Interrupts
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare Interrupt
DCD TIM2_IRQHandler ; TIM2 Global Interrupt
DCD TIM16_IRQHandler ; TIM16 Global Interrupt
DCD TIM17_IRQHandler ; TIM17 Global Interrupt
DCD I2C1_EV_IRQHandler ; I2C1 Event Interrupt
DCD I2C1_ER_IRQHandler ; I2C1 Error Interrupt
DCD I2C2_EV_IRQHandler ; I2C2 Event Interrupt
DCD I2C2_ER_IRQHandler ; I2C2 Error Interrupt
DCD SPI1_IRQHandler ; SPI1 Interrupt
DCD SPI2_IRQHandler ; SPI2 Interrupt
DCD USART1_IRQHandler ; USART1 Interrupt
DCD USART2_IRQHandler ; USART2 Interrupt
DCD LPUART1_IRQHandler ; LPUART1 Interrupt
DCD LPTIM1_IRQHandler ; LPTIM1 Global Interrupt
DCD LPTIM2_IRQHandler ; LPTIM2 Global Interrupt
DCD EXTI15_10_IRQHandler ; EXTI Lines [15:10] Interrupt
DCD RTC_Alarm_IRQHandler ; RTC Alarms (A and B) Interrupt
DCD LPTIM3_IRQHandler ; LPTIM3 Global Interrupt
DCD SUBGHZSPI_IRQHandler ; SUBGHZSPI Interrupt
DCD IPCC_C1_RX_IRQHandler ; IPCC CPU1 RX occupied interrupt
DCD IPCC_C1_TX_IRQHandler ; IPCC CPU1 RX free interrupt
DCD HSEM_IRQHandler ; HSEM0 Interrupt
DCD I2C3_EV_IRQHandler ; I2C3 Event Interrupt
DCD I2C3_ER_IRQHandler ; I2C3 Error Interrupt
DCD SUBGHZ_Radio_IRQHandler ; SUBGHZ Radio Interrupt
DCD AES_IRQHandler ; AES Interrupt
DCD RNG_IRQHandler ; RNG1 Interrupt
DCD PKA_IRQHandler ; PKA Interrupt
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel 1 Interrupt
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel 2 Interrupt
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel 3 Interrupt
DCD DMA2_Channel4_IRQHandler ; DMA2 Channel 4 Interrupt
DCD DMA2_Channel5_IRQHandler ; DMA2 Channel 5 Interrupt
DCD DMA2_Channel6_IRQHandler ; DMA2 Channel 6 Interrupt
DCD DMA2_Channel7_IRQHandler ; DMA2 Channel 7 Interrupt
DCD DMAMUX1_OVR_IRQHandler ; DMAMUX overrun Interrupt
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK MemManage_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
MemManage_Handler
B MemManage_Handler
PUBWEAK BusFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
BusFault_Handler
B BusFault_Handler
PUBWEAK UsageFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
UsageFault_Handler
B UsageFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK DebugMon_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
DebugMon_Handler
B DebugMon_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_PVM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_PVM_IRQHandler
B PVD_PVM_IRQHandler
PUBWEAK TAMP_STAMP_LSECSS_SSRU_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TAMP_STAMP_LSECSS_SSRU_IRQHandler
B TAMP_STAMP_LSECSS_SSRU_IRQHandler
PUBWEAK RTC_WKUP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_WKUP_IRQHandler
B RTC_WKUP_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_IRQHandler
B RCC_IRQHandler
PUBWEAK EXTI0_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_IRQHandler
B EXTI0_IRQHandler
PUBWEAK EXTI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI1_IRQHandler
B EXTI1_IRQHandler
PUBWEAK EXTI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_IRQHandler
B EXTI2_IRQHandler
PUBWEAK EXTI3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI3_IRQHandler
B EXTI3_IRQHandler
PUBWEAK EXTI4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_IRQHandler
B EXTI4_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_IRQHandler
B DMA1_Channel2_IRQHandler
PUBWEAK DMA1_Channel3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel3_IRQHandler
B DMA1_Channel3_IRQHandler
PUBWEAK DMA1_Channel4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_IRQHandler
B DMA1_Channel4_IRQHandler
PUBWEAK DMA1_Channel5_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel5_IRQHandler
B DMA1_Channel5_IRQHandler
PUBWEAK DMA1_Channel6_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel6_IRQHandler
B DMA1_Channel6_IRQHandler
PUBWEAK DMA1_Channel7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel7_IRQHandler
B DMA1_Channel7_IRQHandler
PUBWEAK ADC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC_IRQHandler
B ADC_IRQHandler
PUBWEAK DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DAC_IRQHandler
B DAC_IRQHandler
PUBWEAK C2SEV_PWR_C2H_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
C2SEV_PWR_C2H_IRQHandler
B C2SEV_PWR_C2H_IRQHandler
PUBWEAK COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
COMP_IRQHandler
B COMP_IRQHandler
PUBWEAK EXTI9_5_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI9_5_IRQHandler
B EXTI9_5_IRQHandler
PUBWEAK TIM1_BRK_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_IRQHandler
B TIM1_BRK_IRQHandler
PUBWEAK TIM1_UP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_UP_IRQHandler
B TIM1_UP_IRQHandler
PUBWEAK TIM1_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_TRG_COM_IRQHandler
B TIM1_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_EV_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_EV_IRQHandler
B I2C1_EV_IRQHandler
PUBWEAK I2C1_ER_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_ER_IRQHandler
B I2C1_ER_IRQHandler
PUBWEAK I2C2_EV_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_EV_IRQHandler
B I2C2_EV_IRQHandler
PUBWEAK I2C2_ER_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_ER_IRQHandler
B I2C2_ER_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK LPUART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LPUART1_IRQHandler
B LPUART1_IRQHandler
PUBWEAK LPTIM1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LPTIM1_IRQHandler
B LPTIM1_IRQHandler
PUBWEAK LPTIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LPTIM2_IRQHandler
B LPTIM2_IRQHandler
PUBWEAK EXTI15_10_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI15_10_IRQHandler
B EXTI15_10_IRQHandler
PUBWEAK RTC_Alarm_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_Alarm_IRQHandler
B RTC_Alarm_IRQHandler
PUBWEAK LPTIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
LPTIM3_IRQHandler
B LPTIM3_IRQHandler
PUBWEAK SUBGHZSPI_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SUBGHZSPI_IRQHandler
B SUBGHZSPI_IRQHandler
PUBWEAK IPCC_C1_RX_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
IPCC_C1_RX_IRQHandler
B IPCC_C1_RX_IRQHandler
PUBWEAK IPCC_C1_TX_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
IPCC_C1_TX_IRQHandler
B IPCC_C1_TX_IRQHandler
PUBWEAK HSEM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
HSEM_IRQHandler
B HSEM_IRQHandler
PUBWEAK I2C3_EV_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C3_EV_IRQHandler
B I2C3_EV_IRQHandler
PUBWEAK I2C3_ER_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C3_ER_IRQHandler
B I2C3_ER_IRQHandler
PUBWEAK SUBGHZ_Radio_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SUBGHZ_Radio_IRQHandler
B SUBGHZ_Radio_IRQHandler
PUBWEAK AES_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
AES_IRQHandler
B AES_IRQHandler
PUBWEAK RNG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RNG_IRQHandler
B RNG_IRQHandler
PUBWEAK PKA_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PKA_IRQHandler
B PKA_IRQHandler
PUBWEAK DMA2_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel1_IRQHandler
B DMA2_Channel1_IRQHandler
PUBWEAK DMA2_Channel2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel2_IRQHandler
B DMA2_Channel2_IRQHandler
PUBWEAK DMA2_Channel3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel3_IRQHandler
B DMA2_Channel3_IRQHandler
PUBWEAK DMA2_Channel4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel4_IRQHandler
B DMA2_Channel4_IRQHandler
PUBWEAK DMA2_Channel5_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel5_IRQHandler
B DMA2_Channel5_IRQHandler
PUBWEAK DMA2_Channel6_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel6_IRQHandler
B DMA2_Channel6_IRQHandler
PUBWEAK DMA2_Channel7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA2_Channel7_IRQHandler
B DMA2_Channel7_IRQHandler
PUBWEAK DMAMUX1_OVR_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMAMUX1_OVR_IRQHandler
B DMAMUX1_OVR_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 16,279
|
Projects/NUCLEO-WL55JC/Templates_LL/SingleCore/STM32CubeIDE/Application/Startup/startup_stm32wl55jcix.s
|
/**
******************************************************************************
* @file startup_stm32wl55xx_cm4.s
* @author MCD Application Team
* @brief STM32WL55xx devices Cortex-M4 vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address,
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2020 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The STM32WL55xx Cortex-M4 vector table. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window Watchdog interrupt */
.word PVD_PVM_IRQHandler /* PVD and PVM interrupt through EXTI */
.word TAMP_STAMP_LSECSS_SSRU_IRQHandler /* RTC Tamper, RTC TimeStamp, LSECSS and RTC SSRU int.*/
.word RTC_WKUP_IRQHandler /* RTC wakeup interrupt through EXTI[19] */
.word FLASH_IRQHandler /* Flash memory global interrupt and Flash memory ECC */
.word RCC_IRQHandler /* RCC global interrupt */
.word EXTI0_IRQHandler /* EXTI line 0 interrupt */
.word EXTI1_IRQHandler /* EXTI line 1 interrupt */
.word EXTI2_IRQHandler /* EXTI line 2 interrupt */
.word EXTI3_IRQHandler /* EXTI line 3 interrupt */
.word EXTI4_IRQHandler /* EXTI line 4 interrupt */
.word DMA1_Channel1_IRQHandler /* DMA1 channel 1 interrupt */
.word DMA1_Channel2_IRQHandler /* DMA1 channel 2 interrupt */
.word DMA1_Channel3_IRQHandler /* DMA1 channel 3 interrupt */
.word DMA1_Channel4_IRQHandler /* DMA1 channel 4 interrupt */
.word DMA1_Channel5_IRQHandler /* DMA1 channel 5 interrupt */
.word DMA1_Channel6_IRQHandler /* DMA1 channel 6 interrupt */
.word DMA1_Channel7_IRQHandler /* DMA1 channel 7 interrupt */
.word ADC_IRQHandler /* ADC interrupt */
.word DAC_IRQHandler /* DAC interrupt */
.word C2SEV_PWR_C2H_IRQHandler /* CPU M0+ SEV Interrupt */
.word COMP_IRQHandler /* COMP1 and COMP2 interrupt through EXTI */
.word EXTI9_5_IRQHandler /* EXTI line 9_5 interrupt */
.word TIM1_BRK_IRQHandler /* Timer 1 break interrupt */
.word TIM1_UP_IRQHandler /* Timer 1 Update */
.word TIM1_TRG_COM_IRQHandler /* Timer 1 trigger and communication */
.word TIM1_CC_IRQHandler /* Timer 1 capture compare interrupt */
.word TIM2_IRQHandler /* TIM2 global interrupt */
.word TIM16_IRQHandler /* Timer 16 global interrupt */
.word TIM17_IRQHandler /* Timer 17 global interrupt */
.word I2C1_EV_IRQHandler /* I2C1 event interrupt */
.word I2C1_ER_IRQHandler /* I2C1 event interrupt */
.word I2C2_EV_IRQHandler /* I2C2 error interrupt */
.word I2C2_ER_IRQHandler /* I2C2 error interrupt */
.word SPI1_IRQHandler /* SPI1 global interrupt */
.word SPI2_IRQHandler /* SPI2 global interrupt */
.word USART1_IRQHandler /* USART1 global interrupt */
.word USART2_IRQHandler /* USART2 global interrupt */
.word LPUART1_IRQHandler /* LPUART1 global interrupt */
.word LPTIM1_IRQHandler /* LPtimer 1 global interrupt */
.word LPTIM2_IRQHandler /* LPtimer 2 global interrupt */
.word EXTI15_10_IRQHandler /* EXTI line 15_10] interrupt through EXTI */
.word RTC_Alarm_IRQHandler /* RTC Alarms A & B interrupt */
.word LPTIM3_IRQHandler /* LPtimer 3 global interrupt */
.word SUBGHZSPI_IRQHandler /* SUBGHZSPI global interrupt */
.word IPCC_C1_RX_IRQHandler /* IPCC CPU1 RX occupied interrupt */
.word IPCC_C1_TX_IRQHandler /* IPCC CPU1 RX free interrupt */
.word HSEM_IRQHandler /* Semaphore interrupt 0 to CPU1 */
.word I2C3_EV_IRQHandler /* I2C3 event interrupt */
.word I2C3_ER_IRQHandler /* I2C3 error interrupt */
.word SUBGHZ_Radio_IRQHandler /* Radio IRQs RFBUSY interrupt through EXTI */
.word AES_IRQHandler /* AES global interrupt */
.word RNG_IRQHandler /* RNG interrupt */
.word PKA_IRQHandler /* PKA interrupt */
.word DMA2_Channel1_IRQHandler /* DMA2 channel 1 interrupt */
.word DMA2_Channel2_IRQHandler /* DMA2 channel 2 interrupt */
.word DMA2_Channel3_IRQHandler /* DMA2 channel 3 interrupt */
.word DMA2_Channel4_IRQHandler /* DMA2 channel 4 interrupt */
.word DMA2_Channel5_IRQHandler /* DMA2 channel 5 interrupt */
.word DMA2_Channel6_IRQHandler /* DMA2 channel 6 interrupt */
.word DMA2_Channel7_IRQHandler /* DMA2 channel 7 interrupt */
.word DMAMUX1_OVR_IRQHandler /* DMAMUX overrun interrupt */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_PVM_IRQHandler
.thumb_set PVD_PVM_IRQHandler,Default_Handler
.weak TAMP_STAMP_LSECSS_SSRU_IRQHandler
.thumb_set TAMP_STAMP_LSECSS_SSRU_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak DMA1_Channel7_IRQHandler
.thumb_set DMA1_Channel7_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak DAC_IRQHandler
.thumb_set DAC_IRQHandler,Default_Handler
.weak C2SEV_PWR_C2H_IRQHandler
.thumb_set C2SEV_PWR_C2H_IRQHandler,Default_Handler
.weak COMP_IRQHandler
.thumb_set COMP_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak LPTIM2_IRQHandler
.thumb_set LPTIM2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak LPTIM3_IRQHandler
.thumb_set LPTIM3_IRQHandler,Default_Handler
.weak SUBGHZSPI_IRQHandler
.thumb_set SUBGHZSPI_IRQHandler,Default_Handler
.weak IPCC_C1_RX_IRQHandler
.thumb_set IPCC_C1_RX_IRQHandler,Default_Handler
.weak IPCC_C1_TX_IRQHandler
.thumb_set IPCC_C1_TX_IRQHandler,Default_Handler
.weak HSEM_IRQHandler
.thumb_set HSEM_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak SUBGHZ_Radio_IRQHandler
.thumb_set SUBGHZ_Radio_IRQHandler,Default_Handler
.weak AES_IRQHandler
.thumb_set AES_IRQHandler,Default_Handler
.weak RNG_IRQHandler
.thumb_set RNG_IRQHandler,Default_Handler
.weak PKA_IRQHandler
.thumb_set PKA_IRQHandler,Default_Handler
.weak DMA2_Channel1_IRQHandler
.thumb_set DMA2_Channel1_IRQHandler,Default_Handler
.weak DMA2_Channel2_IRQHandler
.thumb_set DMA2_Channel2_IRQHandler,Default_Handler
.weak DMA2_Channel3_IRQHandler
.thumb_set DMA2_Channel3_IRQHandler,Default_Handler
.weak DMA2_Channel4_IRQHandler
.thumb_set DMA2_Channel4_IRQHandler,Default_Handler
.weak DMA2_Channel5_IRQHandler
.thumb_set DMA2_Channel5_IRQHandler,Default_Handler
.weak DMA2_Channel6_IRQHandler
.thumb_set DMA2_Channel6_IRQHandler,Default_Handler
.weak DMA2_Channel7_IRQHandler
.thumb_set DMA2_Channel7_IRQHandler,Default_Handler
.weak DMAMUX1_OVR_IRQHandler
.thumb_set DMAMUX1_OVR_IRQHandler,Default_Handler
.weak SystemInit
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 1,347
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_GNU_CM0PLUS.s
|
/**
******************************************************************************
* @file se_stack_smuggler_GNU_CM0PLUS.s
* @author MCD Application Team
* @brief Switch SP from SB to SE RAM region.
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file in
* the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.section .text
.global SE_SP_SMUGGLE
.syntax unified
.thumb
//SE_SP_SMUGGLE(SE_FunctionIDTypeDef eID, SE_StatusTypeDef *peSE_Status, ...)
//R0 and R1 are used to call with new stack SE_CallGateService
.global __ICFEDIT_SE_region_RAM_stack_top__
.global SE_CallGateService
SE_SP_SMUGGLE:
// SP - 8
PUSH {R6, R7, LR}
// retrieve SP value on R7
MOV R7, SP
// CHANGE SP
LDR R6, =__ICFEDIT_SE_region_RAM_stack_top__
MOV SP, R6
// Let 4 bytes to store appli vector
SUB SP, SP, #4
// push R7 on new stack
PUSH {R7}
BL SE_CallGateService
// retrieve previous stack
POP {R7}
// put new stack
MOV SP, R7
// return
POP {R6, R7, PC}
.end
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 1,340
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_GNU.s
|
/**
******************************************************************************
* @file se_stack_smuggler_GNU.s
* @author MCD Application Team
* @brief Switch SP from SB to SE RAM region.
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file in
* the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.section .text
.global SE_SP_SMUGGLE
.syntax unified
.thumb
//SE_SP_SMUGGLE(SE_FunctionIDTypeDef eID, SE_StatusTypeDef *peSE_Status, ...)
//R0 and R1 are used to call with new stack SE_CallGateService
.global __ICFEDIT_SE_region_RAM_stack_top__
.global SE_CallGateService
SE_SP_SMUGGLE:
// SP - 8
PUSH {R11,LR}
// retrieve SP value on R11
MOV R11, SP
// CHANGE SP
LDR SP, =__ICFEDIT_SE_region_RAM_stack_top__
// Let 4 byte to store appli vector address
SUB SP, SP, #4
// push R11 on new stack
PUSH {R11}
BLX SE_CallGateService
// retrieve previous stack
POP {R11}
// put new stack
MOV SP, R11
POP {R11, LR}
// return
BX LR
.end
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 1,320
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_IAR_CM0PLUS.s
|
;/******************************************************************************
;* File Name : se_stack_smuggler_IAR_CM0PLUS.s
;* Author : MCD Application Team
;* Description : Switch SP from SB to SE RAM region.
;********************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;******************************************************************************
;
SECTION .text:CODE
EXPORT SE_SP_SMUGGLE
;SE_SP_SMUGGLE(SE_FunctionIDTypeDef eID, SE_StatusTypeDef *peSE_Status, ...)
;R0 and R1 are used to call with new stack SE_CallGateService
IMPORT __ICFEDIT_SE_region_RAM_stack_top__
IMPORT SE_CallGateService
SE_SP_SMUGGLE
; SP - 8
PUSH {R6, R7, LR}
; retrieve SP value on R7
MOV R7, SP
; CHANGE SP
LDR R6, =__ICFEDIT_SE_region_RAM_stack_top__
MOV SP, R6
; Let 4 bytes to store appli vector
SUB SP, SP, #4
; push R7 on new stack
PUSH {R7}
BL SE_CallGateService
; retrieve previous stack
POP {R7}
; put new stack
MOV SP, R7
; return
POP {R6, R7, PC}
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 1,306
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_IAR.s
|
;/******************************************************************************
;* File Name : se_stack_smuggler_IAR.s
;* Author : MCD Application Team
;* Description : Switch SP from SB to SE RAM region.
;*******************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;*******************************************************************************
;
SECTION .text:CODE
EXPORT SE_SP_SMUGGLE
;SE_SP_SMUGGLE(SE_FunctionIDTypeDef eID, SE_StatusTypeDef *peSE_Status, ...)
;R0 and R1 are used to call with new stack SE_CallGateService
IMPORT __ICFEDIT_SE_region_RAM_stack_top__
IMPORT SE_CallGateService
SE_SP_SMUGGLE
; SP - 8
PUSH {R11,LR}
; retrieve SP value on R11
MOV R11, SP
; CHANGE SP
LDR SP, =__ICFEDIT_SE_region_RAM_stack_top__
; Let 4 bytes to store appli vector
SUB SP, SP, #4
; push R11 on new stack
PUSH {R11}
BLX SE_CallGateService
; retrieve previous stack
POP {R11}
; put new stack
MOV SP, R11
POP {R11, LR}
; return
BX LR
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 1,363
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_ARM.s
|
;/******************************************************************************
;* File Name : se_stack_smuggler_ARM.s
;* Author : MCD Application Team
;* Description : Switch SP from SB to SE RAM region.
;*******************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;*******************************************************************************
;
AREA |.text|, CODE
EXPORT SE_SP_SMUGGLE
;SE_SP_SMUGGLE(SE_FunctionIDTypeDef eID, SE_StatusTypeDef *peSE_Status, ...)
;R0 and R1 are used to call with new stack SE_CallGateService
PRESERVE8 {TRUE}
IMPORT |Image$$SE_region_RAM$$Base|
IMPORT SE_CallGateService
SE_SP_SMUGGLE
; SP - 8
PUSH {R11,LR}
; retrieve SP value on R11
MOV R11, SP
; CHANGE SP
LDR SP, =|Image$$SE_region_RAM$$Base|
; Let 4 bytes to store appli vector
SUB SP, SP, #4
; push R11 on new stack
PUSH {R11}
BL SE_CallGateService
; retrieve previous stack
POP {R11}
; put new stack
MOV SP, R11
POP {R11, LR}
; return
BX LR
ALIGN 4 ; now aligned on 4-byte boundary
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 2,727
|
Middlewares/ST/STM32_Secure_Engine/Core/se_interface_exception_ARM.s
|
;/******************************************************************************
;* File Name : se_interface_exception.s
;* Author : MCD Application Team
;* Description : This file defines function to handle user interrupts
;* raised in Firewall
;*******************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;*******************************************************************************
;
AREA |.SE_IF_Code_Entry|, CODE
EXPORT SE_UserHandlerWrapper
IMPORT DummyMemAccess
IMPORT SeCallGateStatusParam
IMPORT SeCallGateAddress
; ******************************************
; Function Name : SE_UserHandlerWrapper
; Description : call Appli IT Handler
; input : R0 : @ Appli IT Handler
; : R1 : Primask value
; internal : R3
; output : R0 : SE_CallGate 1st input param: Service ID
; : R1 : SE_CallGate 2nd input param: return status var @
; : R2 : SE_CallGate 3rd input param: Primask
; return : None
; ******************************************
SE_UserHandlerWrapper
; restore Primask
MSR PRIMASK, R1
; Specific B-L4S5I-IOT01A : force a SRAM data access outside FWALL protected SRAM to close correctly the FWALL
; See errata sheet: ES0393 - Rev 6 - October 2019
; DummyMemAccess variable used to force data access outside FWALL protected SRAM1
; and outside the 18 LSB range protected by FWALL.
LDR R2, =DummyMemAccess
LDR R2, [R2]
; call User IT Handler
; SE_UserHandlerWrapper shall be mapped at @ bit[4] = 1 in linker script file
; the purpose is to have LR bit [4] = 1 after executing the next instruction BLX R0
; So be careful not to modify code here that changes LR bit [4] after executing the next instruction BLX R0
BLX R0
; disable IT
CPSID i
; set input param for SE_CallGate
MOV R0, #0x00001000 ; SE_EXIT_INTERRUPT
LDR R1, =SeCallGateStatusParam
MOV R2, #0xFFFFFFFF ; invalid Primask
; re-enter in firewall via SE_CallGate
LDR R3, =SeCallGateAddress
LDR R3, [R3]
ADD R3, R3, #1
BLX R3 ; LR shall be updated otherwise __IS_CALLER_SE_IF will fail in Se_CallGate
; we shall not raise this point
; B NVIC_SystemReset
ALIGN 4 ; now aligned on 4-byte boundary
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 1,375
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_ARM_CM0PLUS.s
|
;/******************************************************************************
;* File Name : se_stack_smuggler_ARM_CM0PLUS.s
;* Author : MCD Application Team
;* Description : Switch SP from SB to SE RAM region.
;********************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;******************************************************************************
AREA |.text|, CODE
EXPORT SE_SP_SMUGGLE
;SE_SP_SMUGGLE(SE_FunctionIDTypeDef eID, SE_StatusTypeDef *peSE_Status, ...)
;R0 and R1 are used to call with new stack SE_CallGateService
PRESERVE8 {TRUE}
IMPORT |Image$$SE_region_RAM$$Base|
IMPORT SE_CallGateService
SE_SP_SMUGGLE
; SP - 8
PUSH {R6, R7, LR}
; retrieve SP value on R7
MOV R7, SP
; CHANGE SP
LDR R6, =|Image$$SE_region_RAM$$Base|
MOV SP, R6
; Let 4 bytes to store appli vector
SUB SP, SP, #4
; push R7 on new stack
PUSH {R7}
BL SE_CallGateService
; retrieve previous stack
POP {R7}
; put new stack
MOV SP, R7
; return
POP {R6, R7, PC}
ALIGN 4 ; now aligned on 4-byte boundary
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 2,821
|
Middlewares/ST/STM32_Secure_Engine/Core/se_interface_exception_GNU.s
|
/*******************************************************************************
* File Name : se_interface_exception.s
* Author : MCD Application Team
* Description : This file defines function to handle user interrupts
* raised in Firewall
*******************************************************************************
* @attention
*
* Copyright (c) 2020 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file in
* the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
*******************************************************************************
*/
.section .text.SE_UserHandlerWrapper
.global DummyMemAccess
.global SE_UserHandlerWrapper
.global SeCallGateStatusParam
.global __ICFEDIT_SE_CallGate_region_ROM_start__
.syntax unified
.thumb
/******************************************
* Function Name : SE_UserHandlerWrapper
* Description : call Appli IT Handler
* input : R0 : @ Appli IT Handler
* : R1 : Primask value
* internal : R3
* output : R0 : SE_CallGate 1st input param: Service ID
* : R1 : SE_CallGate 2nd input param: return status var @
* : R2 : SE_CallGate 3rd input param: Primask
* return : None
******************************************
*/
SE_UserHandlerWrapper:
// restore Primask
MSR PRIMASK, R1
// Specific B-L4S5I-IOT01A : force a SRAM data access outside FWALL protected SRAM to close correctly the FWALL
// See errata sheet: ES0393 - Rev 6 - October 2019
// DummyMemAccess variable used to force data access outside FWALL protected SRAM1
// and outside the 18 LSB range protected by FWALL.
LDR R2, =DummyMemAccess
LDR R2, [R2]
// call User IT Handler
// SE_UserHandlerWrapper shall be mapped at @ bit[4] = 1 in linker script file
// the purpose is to have LR bit [4] = 1 after executing the next instruction BLX R0
// So be careful not to modify code here that changes LR bit [4] after executing the next instruction BLX R0
BLX R0
// disable IT
CPSID i
// set input param for SE_CallGate
MOV R0, #0x00001000 // SE_EXIT_INTERRUPT
LDR R1, =SeCallGateStatusParam
MOV R2, #0xFFFFFFFF // invalid Primask
// re-enter in firewall via SE_CallGate
LDR R3, =__ICFEDIT_SE_CallGate_region_ROM_start__
ADD R3, R3, #5
BLX R3 // LR shall be updated otherwise __IS_CALLER_SE_IF will fail in Se_CallGate
// we shall not raise this point
// B NVIC_SystemReset
.end
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 4,920
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_it_mngt_IAR.s
|
;/******************************************************************************
;* File Name : se_stack_smuggler_ARM.s
;* Author : MCD Application Team
;* Description : Switch SP from SB to SE RAM region.
;*******************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;*******************************************************************************
;
SECTION .text:CODE
EXPORT SE_SP_SMUGGLE
EXPORT SE_ExitHandler_Service
IMPORT AppliActiveSp
IMPORT AppliMsp
IMPORT SeMsp
IMPORT SeExcEntrySp
IMPORT __ICFEDIT_SE_region_RAM_stack_top__
IMPORT SE_CallGateService
IMPORT PrimaskValue
IMPORT AppliActiveSpMode
IMPORT IntHand
IMPORT SeExcReturn
; ******************************************
; Function Name : SE_SP_SMUGGLE
; Description : save Appli SP, call SE service, restore Appli SP
; input : R0 : eID
; : R1 : peSE_Status
; : R2 : arguments
; internal : R4, R6 : working register to update AppliMsp, AppliActiveSp, PRIMASK, CONTROL
; : R5 : Appli Active SP
; return : R0 : status
; ******************************************
SE_SP_SMUGGLE
; save LR
PUSH {LR}
; save working registers
PUSH {R4, R5, R6}
; save Appli MSP if SE_CallGate called from user appli (no interrupt)
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITTT NE
MRSNE R5, MSP
LDRNE R4, =AppliMsp
STRNE R5, [R4]
; save Appli Active SP if SE_CallGate called from user appli (no interrupt)
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITTT NE
MOVNE R5, SP
LDRNE R4, =AppliActiveSp
STRNE R5, [R4]
; set SP to initial Appli MSP in case of interrupt
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITT EQ
LDREQ R4, =AppliMsp
LDREQ SP, [R4]
; set current Active Stack Pointer to MSP (SE manages only MSP)
MRS R4, CONTROL
AND R4, R4, #0xFFFFFFFD
MSR CONTROL, R4
ISB
; set Active SP to SE Stack
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITTE EQ
LDREQ R4, =SeMsp ; in case service = SE_EXIT_INTERRUPT
LDREQ SP, [R4]
LDRNE SP, =__ICFEDIT_SE_region_RAM_stack_top__ ; in case service != SE_EXIT_INTERRUPT
; restore primask in case service != SE_EXIT_INTERRUPT
CMP R0, #0x00001000 ; compare service to SE_EXIT_INTERRUPT
ITTT NE ; in case service != SE_EXIT_INTERRUPT
LDRNE R4, =PrimaskValue
LDRNE R4, [R4]
MSRNE PRIMASK, R4
; call SE service
BLX SE_CallGateService
; disable interrupts
CPSID i
; reset current Active Stack Pointer to AppliActiveSpMode
MRS R4,CONTROL
LDR R6, =AppliActiveSpMode
LDR R6, [R6]
CMP R6, #0x00000000
ITE EQ
ANDEQ R4, R4, #0xFDFDFDFD
ORRNE R4, R4, #0x02020202
MSR CONTROL, R4
ISB
; restore Appli MSP (at this step, service != SE_EXIT_INTERRUPT)
LDR R4, =AppliMsp
LDR R4, [R4]
MSR MSP, R4
; restore Appli Active SP
LDR R4, =AppliActiveSp
LDR R4, [R4]
MOV SP, R4
; restore saved register
POP {R4, R5, R6}
; restore LR
POP {LR}
; return
BX LR
; ******************************************
; Function Name : SE_ExitHandler_Service
; Description : restore SP & non scratch registers & exit Handler Mode
; input : None
; internal : R0, R1
; return : None: PC set with EXC_RETURN value
; ******************************************
SE_ExitHandler_Service
; reset interrupt handling flag
MOV R0, #0x00000000
LDR R1, =IntHand
STR R0, [R1]
; Set Active SP to SeExcEntrySp
LDR R0, =SeExcEntrySp
LDR SP, [R0]
; R0 = SeExcReturn
LDR R0, =SeExcReturn
LDR R0, [R0]
; check if FPU non scratch register shall be restored
TST R0, #0x10 ; test if FPCA = 1
BNE RESTORE_CORE_REG_ONLY
; restore non-scratch FPU registers
SUB R1, SP, #96 ; R1 = SeExcEntrySp - 24 SE stack entries (24 SE stack entries = (S15 -> S31) + (R4 -> R11))
VLDR S16, [R1]
VLDR S17, [R1, #4]
VLDR S18, [R1, #8]
VLDR S19, [R1, #12]
VLDR S20, [R1, #16]
VLDR S21, [R1, #20]
VLDR S22, [R1, #24]
VLDR S23, [R1, #28]
VLDR S24, [R1, #32]
VLDR S25, [R1, #36]
VLDR S26, [R1, #40]
VLDR S27, [R1, #44]
VLDR S28, [R1, #48]
VLDR S29, [R1, #52]
VLDR S30, [R1, #56]
VLDR S31, [R1, #60]
RESTORE_CORE_REG_ONLY
; restore non-scratch core registers
SUB R1, SP, #32 ; R1 = SeExcEntrySp - 8 SE stack entries (8 SE stack entries = (R4 -> R11))
LDR R4, [R1]
LDR R5, [R1, #4]
LDR R6, [R1, #8]
LDR R7, [R1, #12]
LDR R8, [R1, #16]
LDR R9, [R1, #20]
LDR R10, [R1, #24]
LDR R11, [R1, #28]
; restore Primask
LDR R1, =PrimaskValue
LDR R1, [R1]
MSR PRIMASK, R1
; trigger the exception return
BX R0
; we shall not raise this point
; B NVIC_SystemReset
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 2,707
|
Middlewares/ST/STM32_Secure_Engine/Core/se_interface_exception_IAR.s
|
;/******************************************************************************
;* File Name : se_interface_exception.s
;* Author : MCD Application Team
;* Description : This file defines function to handle user interrupts
;* raised in Firewall
;*******************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;*******************************************************************************
;
SECTION .SE_IF_Code_Entry:CODE:NOROOT(2)
EXPORT SE_UserHandlerWrapper
IMPORT DummyMemAccess
IMPORT SeCallGateStatusParam
IMPORT __ICFEDIT_SE_CallGate_region_ROM_start__
; ******************************************
; Function Name : SE_UserHandlerWrapper
; Description : call Appli IT Handler
; input : R0 : @ Appli IT Handler
; : R1 : Primask value
; internal : R3
; output : R0 : SE_CallGate 1st input param: Service ID
; : R1 : SE_CallGate 2nd input param: return status var @
; : R2 : SE_CallGate 3rd input param: Primask
; return : None
; ******************************************
SE_UserHandlerWrapper
; restore Primask
MSR PRIMASK, R1
; Specific B-L4S5I-IOT01A : force a SRAM data access outside FWALL protected SRAM to close correctly the FWALL
; See errata sheet: ES0393 - Rev 6 - October 2019
; DummyMemAccess variable used to force data access outside FWALL protected SRAM1
; and outside the 18 LSB range protected by FWALL.
LDR R2, =DummyMemAccess
LDR R2, [R2]
; call User IT Handler
; SE_UserHandlerWrapper shall be mapped at @ bit[4] = 1 in linker script file
; the purpose is to have LR bit [4] = 1 after executing the next instruction BLX R0
; So be careful not to modify code here that changes LR bit [4] after executing the next instruction BLX R0
BLX R0
; disable IT
CPSID i
; set input param for SE_CallGate
MOV R0, #0x00001000 ; SE_EXIT_INTERRUPT
LDR R1, =SeCallGateStatusParam
MOV R2, #0xFFFFFFFF ; invalid Primask
; re-enter in firewall via SE_CallGate
LDR R3, =__ICFEDIT_SE_CallGate_region_ROM_start__
ADD R3, R3, #1
BLX R3 ; LR shall be updated otherwise __IS_CALLER_SE_IF will fail in Se_CallGate
; we shall not raise this point
; B NVIC_SystemReset
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 5,087
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_it_mngt_GNU.s
|
/******************************************************************************
* File Name : se_stack_smuggler_IAR.s
* Author : MCD Application Team
* Description : Switch SP from SB to SE RAM region.
*******************************************************************************
* @attention
*
* Copyright (c) 2020 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file in
* the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
*******************************************************************************
*/
.section .text
.global SE_SP_SMUGGLE
.global SE_ExitHandler_Service
.global AppliActiveSp
.global AppliMsp
.global SeMsp
.global SeExcEntrySp
.global __ICFEDIT_SE_region_RAM_stack_top__
.global SE_CallGateService
.global SE_EXIT_INTERRUPT
.global PrimaskValue
.global AppliActiveSpMode
.global IntHand
.global SeExcReturn
.syntax unified
.thumb
/*******************************************
* Function Name : SE_SP_SMUGGLE
* Description : save Appli SP, call SE service, restore Appli SP
* input : R0 : eID
* : R1 : peSE_Status
* : R2 : arguments
* internal : R4, R6 : working register to update AppliMsp, AppliActiveSp, PRIMASK, CONTROL
* : R5 : Appli Active SP
* return : R0 : status
*******************************************
*/
SE_SP_SMUGGLE:
// save LR
PUSH {LR}
// save working registers
PUSH {R4, R5, R6}
// save Appli MSP if SE_CallGate called from user appli (no interrupt)
CMP R0, #0x00001000 // compare to SE_EXIT_INTERRUPT
ITTT NE
MRSNE R5, MSP
LDRNE R4, =AppliMsp
STRNE R5, [R4]
// save Appli Active SP if SE_CallGate called from user appli (no interrupt)
CMP R0, #0x00001000 // compare to SE_EXIT_INTERRUPT
ITTT NE
MOVNE R5, SP
LDRNE R4, =AppliActiveSp
STRNE R5, [R4]
// set SP to initial Appli MSP in case of interrupt
CMP R0, #0x00001000 // compare to SE_EXIT_INTERRUPT
ITT EQ
LDREQ R4, =AppliMsp
LDREQ SP, [R4]
// set current Active Stack Pointer to MSP (SE manages only MSP)
MRS R4, CONTROL
AND R4, R4, #0xFFFFFFFD
MSR CONTROL, R4
ISB
// set Active SP to SE Stack
CMP R0, #0x00001000 // compare to SE_EXIT_INTERRUPT
ITTE EQ
LDREQ R4, =SeMsp // in case service = SE_EXIT_INTERRUPT
LDREQ SP, [R4] // in case service = SE_EXIT_INTERRUPT
LDRNE SP, =__ICFEDIT_SE_region_RAM_stack_top__ // in case service != SE_EXIT_INTERRUPT
// restore primask in case service != SE_EXIT_INTERRUPT
CMP R0, #0x00001000 // compare service to SE_EXIT_INTERRUPT
ITTT NE // in case service != SE_EXIT_INTERRUPT
LDRNE R4, =PrimaskValue
LDRNE R4, [R4]
MSRNE PRIMASK, R4
// call SE service
BLX SE_CallGateService
// disable interrupts
CPSID i
// reset current Active Stack Pointer to AppliActiveSpMode
MRS R4,CONTROL
LDR R6, =AppliActiveSpMode
LDR R6, [R6]
CMP R6, #0x00000000
ITE EQ
ANDEQ R4, R4, #0xFDFDFDFD
ORRNE R4, R4, #0x02020202
MSR CONTROL, R4
ISB
// restore Appli MSP (at this step, service != SE_EXIT_INTERRUPT)
LDR R4, =AppliMsp
LDR R4, [R4]
MSR MSP, R4
// restore Appli Active SP
LDR R4, =AppliActiveSp
LDR R4, [R4]
MOV SP, R4
// restore saved register
POP {R4, R5, R6}
// restore LR
POP {LR}
// return
BX LR
/******************************************
* Function Name : SE_ExitHandler_Service
* Description : restore SP & non scratch registers & exit Handler Mode
* input : None
* internal : R0, R1
* return : None: PC set with EXC_RETURN value
******************************************
*/
SE_ExitHandler_Service:
// reset interrupt handling flag
MOV R0, #0x00000000
LDR R1, =IntHand
STR R0, [R1]
// Set Active SP to SeExcEntrySp
LDR R0, =SeExcEntrySp
LDR SP, [R0]
// R0 = SeExcReturn
LDR R0, =SeExcReturn
LDR R0, [R0]
// check if FPU non scratch register shall be restored
TST R0, #0x10 // test if FPCA = 1
BNE RESTORE_CORE_REG_ONLY
// restore non-scratch FPU registers
SUB R1, SP, #96 // R1 = SeExcEntrySp - 24 SE stack entries (24 SE stack entries = (S15 -> S31) + (R4 -> R11))
VLDR S16, [R1]
VLDR S17, [R1, #4]
VLDR S18, [R1, #8]
VLDR S19, [R1, #12]
VLDR S20, [R1, #16]
VLDR S21, [R1, #20]
VLDR S22, [R1, #24]
VLDR S23, [R1, #28]
VLDR S24, [R1, #32]
VLDR S25, [R1, #36]
VLDR S26, [R1, #40]
VLDR S27, [R1, #44]
VLDR S28, [R1, #48]
VLDR S29, [R1, #52]
VLDR S30, [R1, #56]
VLDR S31, [R1, #60]
RESTORE_CORE_REG_ONLY:
// restore non-scratch core registers
SUB R1, SP, #32 // R1 = SeExcEntrySp - 8 SE stack entries (8 SE stack entries = (R4 -> R11))
LDR R4, [R1]
LDR R5, [R1, #4]
LDR R6, [R1, #8]
LDR R7, [R1, #12]
LDR R8, [R1, #16]
LDR R9, [R1, #20]
LDR R10, [R1, #24]
LDR R11, [R1, #28]
// restore Primask
LDR R1, =PrimaskValue
LDR R1, [R1]
MSR PRIMASK, R1
// trigger the exception return
BX R0
// we shall not raise this point
// B NVIC_SystemReset
.end
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 4,991
|
Middlewares/ST/STM32_Secure_Engine/Core/se_stack_smuggler_it_mngt_ARM.s
|
;/******************************************************************************
;* File Name : se_stack_smuggler_ARM.s
;* Author : MCD Application Team
;* Description : Switch SP from SB to SE RAM region.
;*******************************************************************************
;* @attention
;*
;* Copyright (c) 2020 STMicroelectronics.
;* All rights reserved.
;*
;* This software is licensed under terms that can be found in the LICENSE file in
;* the root directory of this software component.
;* If no LICENSE file comes with this software, it is provided AS-IS.
;*
;*******************************************************************************
;
AREA |.text|, CODE
EXPORT SE_SP_SMUGGLE
EXPORT SE_ExitHandler_Service
PRESERVE8 {TRUE}
IMPORT AppliActiveSp
IMPORT AppliMsp
IMPORT SeMsp
IMPORT SeExcEntrySp
IMPORT |Image$$SE_region_RAM$$Base|
IMPORT SE_CallGateService
IMPORT PrimaskValue
IMPORT AppliActiveSpMode
IMPORT IntHand
IMPORT SeExcReturn
; ******************************************
; Function Name : SE_SP_SMUGGLE
; Description : save Appli SP, call SE service, restore Appli SP
; input : R0 : eID
; : R1 : peSE_Status
; : R2 : arguments
; internal : R4, R6 : working register to update AppliMsp, AppliActiveSp, PRIMASK, CONTROL
; : R5 : Appli Active SP
; return : R0 : status
; ******************************************
SE_SP_SMUGGLE
; save LR
PUSH {LR}
; save working registers
PUSH {R4, R5, R6}
; save Appli MSP if SE_CallGate called from user appli (no interrupt)
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITTT NE
MRSNE R5, MSP
LDRNE R4, =AppliMsp
STRNE R5, [R4]
; save Appli Active SP if SE_CallGate called from user appli (no interrupt)
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITTT NE
MOVNE R5, SP
LDRNE R4, =AppliActiveSp
STRNE R5, [R4]
; set SP to initial Appli MSP in case of interrupt
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITT EQ
LDREQ R4, =AppliMsp
LDREQ SP, [R4]
; set current Active Stack Pointer to MSP (SE manages only MSP)
MRS R4, CONTROL
AND R4, R4, #0xFFFFFFFD
MSR CONTROL, R4
ISB
; set Active SP to SE Stack
CMP R0, #0x00001000 ; compare to SE_EXIT_INTERRUPT
ITTE EQ
LDREQ R4, =SeMsp ; in case service = SE_EXIT_INTERRUPT
LDREQ SP, [R4]
LDRNE SP, =|Image$$SE_region_RAM$$Base| ; in case service != SE_EXIT_INTERRUPT
; restore primask in case service != SE_EXIT_INTERRUPT
CMP R0, #0x00001000 ; compare service to SE_EXIT_INTERRUPT
ITTT NE ; in case service != SE_EXIT_INTERRUPT
LDRNE R4, =PrimaskValue
LDRNE R4, [R4]
MSRNE PRIMASK, R4
; call SE service
LDR R4, =SE_CallGateService
BLX R4
; disable interrupts
CPSID i
; reset current Active Stack Pointer to AppliActiveSpMode
MRS R4,CONTROL
LDR R6, =AppliActiveSpMode
LDR R6, [R6]
CMP R6, #0x00000000
ITE EQ
ANDEQ R4, R4, #0xFDFDFDFD
ORRNE R4, R4, #0x02020202
MSR CONTROL, R4
ISB
; restore Appli MSP (at this step, service != SE_EXIT_INTERRUPT)
LDR R4, =AppliMsp
LDR R4, [R4]
MSR MSP, R4
; restore Appli Active SP
LDR R4, =AppliActiveSp
LDR R4, [R4]
MOV SP, R4
; restore saved register
POP {R4, R5, R6}
; restore LR
POP {LR}
; return
BX LR
; ******************************************
; Function Name : SE_ExitHandler_Service
; Description : restore SP & non scratch registers & exit Handler Mode
; input : None
; internal : R0, R1
; return : None: PC set with EXC_RETURN value
; ******************************************
SE_ExitHandler_Service
; reset interrupt handling flag
MOV R0, #0x00000000
LDR R1, =IntHand
STR R0, [R1]
; Set Active SP to SeExcEntrySp
LDR R0, =SeExcEntrySp
LDR SP, [R0]
; R0 = SeExcReturn
LDR R0, =SeExcReturn
LDR R0, [R0]
; check if FPU non scratch register shall be restored
TST R0, #0x10 ; test if FPCA = 1
BNE RESTORE_CORE_REG_ONLY
; restore non-scratch FPU registers
SUB R1, SP, #96 ; R1 = SeExcEntrySp - 24 SE stack entries (24 SE stack entries = (S15 -> S31) + (R4 -> R11))
VLDR S16, [R1]
VLDR S17, [R1, #4]
VLDR S18, [R1, #8]
VLDR S19, [R1, #12]
VLDR S20, [R1, #16]
VLDR S21, [R1, #20]
VLDR S22, [R1, #24]
VLDR S23, [R1, #28]
VLDR S24, [R1, #32]
VLDR S25, [R1, #36]
VLDR S26, [R1, #40]
VLDR S27, [R1, #44]
VLDR S28, [R1, #48]
VLDR S29, [R1, #52]
VLDR S30, [R1, #56]
VLDR S31, [R1, #60]
RESTORE_CORE_REG_ONLY
; restore non-scratch core registers
SUB R1, SP, #32 ; R1 = SeExcEntrySp - 8 SE stack entries (8 SE stack entries = (R4 -> R11))
LDR R4, [R1]
LDR R5, [R1, #4]
LDR R6, [R1, #8]
LDR R7, [R1, #12]
LDR R8, [R1, #16]
LDR R9, [R1, #20]
LDR R10, [R1, #24]
LDR R11, [R1, #28]
; restore Primask
LDR R1, =PrimaskValue
LDR R1, [R1]
MSR PRIMASK, R1
; trigger the exception return
BX R0
; we shall not raise this point
; B NVIC_SystemReset
ALIGN 4 ; now aligned on 4-byte boundary
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 2,814
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM3/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
ldr r2, [r3]
stmdb r0!, {r4-r11} /* Save the remaining registers. */
str r0, [r2] /* Save the new top of stack into the first member of the TCB. */
stmdb sp!, {r3, r14}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
msr basepri, r0
dsb
isb
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r3, r14}
ldr r1, [r3]
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
ldmia r0!, {r4-r11} /* Pop the registers. */
msr psp, r0
isb
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11}
msr psp, r0
isb
mov r0, #0
msr basepri, r0
orr r14, r14, #13
bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Call SVC to start the first task, ensuring interrupts are enabled. */
cpsie i
cpsie f
dsb
isb
svc 0
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 3,722
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM4F/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC vPortEnableVFP
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r2, [r3]
/* Is the task using the FPU context? If so, push high vfp registers. */
tst r14, #0x10
it eq
vstmdbeq r0!, {s16-s31}
/* Save the core registers. */
stmdb r0!, {r4-r11, r14}
/* Save the new top of stack into the first member of the TCB. */
str r0, [r2]
stmdb sp!, {r0, r3}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
msr basepri, r0
dsb
isb
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r0, r3}
/* The first item in pxCurrentTCB is the task top of stack. */
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
/* Is the task using the FPU context? If so, pop the high vfp registers
too. */
tst r14, #0x10
it eq
vldmiaeq r0!, {s16-s31}
msr psp, r0
isb
#ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
#if WORKAROUND_PMU_CM001 == 1
push { r14 }
pop { pc }
#endif
#endif
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
msr psp, r0
isb
mov r0, #0
msr basepri, r0
bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Clear the bit that indicates the FPU is in use in case the FPU was used
before the scheduler was started - which would otherwise result in the
unnecessary leaving of space in the SVC stack for lazy saving of FPU
registers. */
mov r0, #0
msr control, r0
/* Call SVC to start the first task. */
cpsie i
cpsie f
dsb
isb
svc 0
/*-----------------------------------------------------------*/
vPortEnableVFP:
/* The FPU enable bits are in the CPACR. */
ldr.w r0, =0xE000ED88
ldr r1, [r0]
/* Enable CP10 and CP11 coprocessors, then save back. */
orr r1, r1, #( 0xf << 20 )
str r1, [r0]
bx r14
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 5,588
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM4_MPU/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC vPortEnableVFP
PUBLIC vPortRestoreContextOfFirstTask
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r2, [r3]
/* Is the task using the FPU context? If so, push high vfp registers. */
tst r14, #0x10
it eq
vstmdbeq r0!, {s16-s31}
/* Save the core registers. */
mrs r1, control
stmdb r0!, {r1, r4-r11, r14}
/* Save the new top of stack into the first member of the TCB. */
str r0, [r2]
stmdb sp!, {r0, r3}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
msr basepri, r0
dsb
isb
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r0, r3}
/* The first item in pxCurrentTCB is the task top of stack. */
ldr r1, [r3]
ldr r0, [r1]
/* Move onto the second item in the TCB... */
add r1, r1, #4
/* Region Base Address register. */
ldr r2, =0xe000ed9c
/* Read 4 sets of MPU registers. */
ldmia r1!, {r4-r11}
/* Write 4 sets of MPU registers. */
stmia r2!, {r4-r11}
/* Pop the registers that are not automatically saved on exception entry. */
ldmia r0!, {r3-r11, r14}
msr control, r3
/* Is the task using the FPU context? If so, pop the high vfp registers
too. */
tst r14, #0x10
it eq
vldmiaeq r0!, {s16-s31}
msr psp, r0
isb
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
#ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
#else
mrs r0, psp
#endif
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
vPortStartFirstTask:
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Clear the bit that indicates the FPU is in use in case the FPU was used
before the scheduler was started - which would otherwise result in the
unnecessary leaving of space in the SVC stack for lazy saving of FPU
registers. */
mov r0, #0
msr control, r0
/* Call SVC to start the first task. */
cpsie i
cpsie f
dsb
isb
svc 0
/*-----------------------------------------------------------*/
vPortRestoreContextOfFirstTask:
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Restore the context. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
/* The first item in the TCB is the task top of stack. */
ldr r0, [r1]
/* Move onto the second item in the TCB... */
add r1, r1, #4
/* Region Base Address register. */
ldr r2, =0xe000ed9c
/* Read 4 sets of MPU registers. */
ldmia r1!, {r4-r11}
/* Write 4 sets of MPU registers. */
stmia r2!, {r4-r11}
/* Pop the registers that are not automatically saved on exception entry. */
ldmia r0!, {r3-r11, r14}
msr control, r3
/* Restore the task stack pointer. */
msr psp, r0
mov r0, #0
msr basepri, r0
bx r14
/*-----------------------------------------------------------*/
vPortEnableVFP:
/* The FPU enable bits are in the CPACR. */
ldr.w r0, =0xE000ED88
ldr r1, [r0]
/* Enable CP10 and CP11 coprocessors, then save back. */
orr r1, r1, #( 0xf << 20 )
str r1, [r0]
bx r14
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 4,157
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM0/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN vPortYieldFromISR
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC vSetMSP
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
/*-----------------------------------------------------------*/
vSetMSP
msr msp, r0
bx lr
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
ldr r2, [r3]
subs r0, r0, #32 /* Make space for the remaining low registers. */
str r0, [r2] /* Save the new top of stack. */
stmia r0!, {r4-r7} /* Store the low registers that are not saved automatically. */
mov r4, r8 /* Store the high registers. */
mov r5, r9
mov r6, r10
mov r7, r11
stmia r0!, {r4-r7}
push {r3, r14}
cpsid i
bl vTaskSwitchContext
cpsie i
pop {r2, r3} /* lr goes in r3. r2 now holds tcb pointer. */
ldr r1, [r2]
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
adds r0, r0, #16 /* Move to the high registers. */
ldmia r0!, {r4-r7} /* Pop the high registers. */
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
msr psp, r0 /* Remember the new top of stack for the task. */
subs r0, r0, #32 /* Go back for the low registers that are not automatically restored. */
ldmia r0!, {r4-r7} /* Pop low registers. */
bx r3
/*-----------------------------------------------------------*/
vPortSVCHandler;
/* This function is no longer used, but retained for backward
compatibility. */
bx lr
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* The MSP stack is not reset as, unlike on M3/4 parts, there is no vector
table offset register that can be used to locate the initial stack value.
Not all M0 parts have the application vector table at address 0. */
ldr r3, =pxCurrentTCB /* Obtain location of pxCurrentTCB. */
ldr r1, [r3]
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
movs r0, #2 /* Switch to the psp stack. */
msr CONTROL, r0
isb
pop {r0-r5} /* Pop the registers that are saved automatically. */
mov lr, r5 /* lr is now in r5. */
pop {r3} /* The return address is now in r3. */
pop {r2} /* Pop and discard the XPSR. */
cpsie i /* The first task has its context and interrupts can be enabled. */
bx r3 /* Jump to the user defined task code. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR
msr PRIMASK, r0
bx lr
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 3,740
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM7/r0p1/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC vPortEnableVFP
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r2, [r3]
/* Is the task using the FPU context? If so, push high vfp registers. */
tst r14, #0x10
it eq
vstmdbeq r0!, {s16-s31}
/* Save the core registers. */
stmdb r0!, {r4-r11, r14}
/* Save the new top of stack into the first member of the TCB. */
str r0, [r2]
stmdb sp!, {r0, r3}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
cpsid i
msr basepri, r0
dsb
isb
cpsie i
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r0, r3}
/* The first item in pxCurrentTCB is the task top of stack. */
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
/* Is the task using the FPU context? If so, pop the high vfp registers
too. */
tst r14, #0x10
it eq
vldmiaeq r0!, {s16-s31}
msr psp, r0
isb
#ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
#if WORKAROUND_PMU_CM001 == 1
push { r14 }
pop { pc }
#endif
#endif
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
msr psp, r0
isb
mov r0, #0
msr basepri, r0
bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Clear the bit that indicates the FPU is in use in case the FPU was used
before the scheduler was started - which would otherwise result in the
unnecessary leaving of space in the SVC stack for lazy saving of FPU
registers. */
mov r0, #0
msr control, r0
/* Call SVC to start the first task. */
cpsie i
cpsie f
dsb
isb
svc 0
/*-----------------------------------------------------------*/
vPortEnableVFP:
/* The FPU enable bits are in the CPACR. */
ldr.w r0, =0xE000ED88
ldr r1, [r0]
/* Enable CP10 and CP11 coprocessors, then save back. */
orr r1, r1, #( 0xf << 20 )
str r1, [r0]
bx r14
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 15,449
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM33/non_secure/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vPortAllocateSecureContext
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC vPortFreeSecureContext
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
str r4, [r2] /* Disable MPU. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
str r4, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
ldr r5, =xSecureContext
str r1, [r5] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
msr control, r3 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r4 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r1, psp /* Read PSP in r1. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r2] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
cbz r0, save_ns_context /* No secure context to save. */
push {r0-r2, r14}
bl SecureContext_SaveContext
pop {r0-r3} /* LR is now in r3. */
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl save_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
b select_next_task
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r1!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #16 /* r1 = r1 + 16. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
subs r1, r1, #16 /* r1 = r1 - 16. */
stm r1, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #12 /* r1 = r1 + 12. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r1, r1, #12 /* r1 = r1 - 12. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
select_next_task:
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r1, [r3] /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
str r4, [r2] /* Disable MPU. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
str r4, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r0, r2-r4} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr control, r3 /* Restore the CONTROL register value for the task. */
mov lr, r4 /* LR = r4. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r4}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r4}
mov lr, r4 /* LR = r4. */
lsls r2, r4, #25 /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#else /* configENABLE_MPU */
ldmia r1!, {r0, r2-r3} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
mov lr, r3 /* LR = r3. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r3}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r3}
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#endif /* configENABLE_MPU */
restore_ns_context:
ldmia r1!, {r4-r11} /* Restore the registers that are not automatically restored. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r1!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
/*-----------------------------------------------------------*/
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
/* r0 = uint32_t *pulTCB. */
ldr r1, [r0] /* The first item in the TCB is the top of the stack. */
ldr r0, [r1] /* The first item on the stack is the task's xSecureContext. */
cmp r0, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 3,224
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM33/secure/secure_context_port_asm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
SECTION .text:CODE:NOROOT(2)
THUMB
PUBLIC SecureContext_LoadContextAsm
PUBLIC SecureContext_SaveContextAsm
/*-----------------------------------------------------------*/
SecureContext_LoadContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
ldmia r0!, {r1, r2} /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
msr control, r3 /* CONTROL = r3. */
#endif /* configENABLE_MPU */
msr psplim, r2 /* PSPLIM = r2. */
msr psp, r1 /* PSP = r1. */
load_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
SecureContext_SaveContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
mrs r1, psp /* r1 = PSP. */
#if ( configENABLE_FPU == 1 )
vstmdb r1!, {s0} /* Trigger the defferred stacking of FPU registers. */
vldmia r1!, {s0} /* Nullify the effect of the pervious statement. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
mrs r2, control /* r2 = CONTROL. */
stmdb r1!, {r2} /* Store CONTROL value on the stack. */
#endif /* configENABLE_MPU */
str r1, [r0] /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
movs r1, #0 /* r1 = securecontextNO_STACK. */
msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
save_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 5,612
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM7_MPU/r0p1/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC vPortEnableVFP
PUBLIC vPortRestoreContextOfFirstTask
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r2, [r3]
/* Is the task using the FPU context? If so, push high vfp registers. */
tst r14, #0x10
it eq
vstmdbeq r0!, {s16-s31}
/* Save the core registers. */
mrs r1, control
stmdb r0!, {r1, r4-r11, r14}
/* Save the new top of stack into the first member of the TCB. */
str r0, [r2]
stmdb sp!, {r0, r3}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
cpsid i
msr basepri, r0
dsb
isb
cpsie i
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r0, r3}
/* The first item in pxCurrentTCB is the task top of stack. */
ldr r1, [r3]
ldr r0, [r1]
/* Move onto the second item in the TCB... */
add r1, r1, #4
/* Region Base Address register. */
ldr r2, =0xe000ed9c
/* Read 4 sets of MPU registers. */
ldmia r1!, {r4-r11}
/* Write 4 sets of MPU registers. */
stmia r2!, {r4-r11}
/* Pop the registers that are not automatically saved on exception entry. */
ldmia r0!, {r3-r11, r14}
msr control, r3
/* Is the task using the FPU context? If so, pop the high vfp registers
too. */
tst r14, #0x10
it eq
vldmiaeq r0!, {s16-s31}
msr psp, r0
isb
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
#ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
#else
mrs r0, psp
#endif
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
vPortStartFirstTask:
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Clear the bit that indicates the FPU is in use in case the FPU was used
before the scheduler was started - which would otherwise result in the
unnecessary leaving of space in the SVC stack for lazy saving of FPU
registers. */
mov r0, #0
msr control, r0
/* Call SVC to start the first task. */
cpsie i
cpsie f
dsb
isb
svc 0
/*-----------------------------------------------------------*/
vPortRestoreContextOfFirstTask:
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Restore the context. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
/* The first item in the TCB is the task top of stack. */
ldr r0, [r1]
/* Move onto the second item in the TCB... */
add r1, r1, #4
/* Region Base Address register. */
ldr r2, =0xe000ed9c
/* Read 4 sets of MPU registers. */
ldmia r1!, {r4-r11}
/* Write 4 sets of MPU registers. */
stmia r2!, {r4-r11}
/* Pop the registers that are not automatically saved on exception entry. */
ldmia r0!, {r3-r11, r14}
msr control, r3
/* Restore the task stack pointer. */
msr psp, r0
mov r0, #0
msr basepri, r0
bx r14
/*-----------------------------------------------------------*/
vPortEnableVFP:
/* The FPU enable bits are in the CPACR. */
ldr.w r0, =0xE000ED88
ldr r1, [r0]
/* Enable CP10 and CP11 coprocessors, then save back. */
orr r1, r1, #( 0xf << 20 )
str r1, [r0]
bx r14
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 17,951
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM23/non_secure/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vPortAllocateSecureContext
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC vPortFreeSecureContext
#if ( configENABLE_FPU == 1 )
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
movs r1, #1 /* r1 = 1. */
tst r0, r1 /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
beq running_privileged /* If the result of previous AND operation was 0, branch. */
movs r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
bx lr /* Return. */
running_privileged:
movs r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
movs r1, #1 /* r1 = 1. */
orrs r0, r1 /* r0 = r0 | r1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
movs r5, #1 /* r5 = 1. */
bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
str r4, [r2] /* Disable MPU. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
movs r5, #4 /* r5 = 4. */
str r5, [r2] /* Program RNR = 4. */
ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
movs r5, #5 /* r5 = 5. */
str r5, [r2] /* Program RNR = 5. */
ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
movs r5, #6 /* r5 = 6. */
str r5, [r2] /* Program RNR = 6. */
ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
movs r5, #7 /* r5 = 7. */
str r5, [r2] /* Program RNR = 7. */
ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
movs r5, #1 /* r5 = 1. */
orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
str r4, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
ldr r5, =xSecureContext
str r1, [r5] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
msr control, r3 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r4 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
movs r1, #1 /* r1 = 1. */
bics r0, r1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r1, psp /* Read PSP in r1. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r2] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
cbz r0, save_ns_context /* No secure context to save. */
push {r0-r2, r14}
bl SecureContext_SaveContext
pop {r0-r3} /* LR is now in r3. */
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl save_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
b select_next_task
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #16 /* r1 = r1 + 16. */
stmia r1!, {r4-r7} /* Store the low registers that are not saved automatically. */
mov r4, r8 /* r4 = r8. */
mov r5, r9 /* r5 = r9. */
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r1!, {r4-r7} /* Store the high registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
subs r1, r1, #48 /* r1 = r1 - 48. */
stmia r1!, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r7} /* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
mov r4, r8 /* r4 = r8. */
mov r5, r9 /* r5 = r9. */
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r1!, {r4-r7} /* Store the high registers that are not saved automatically. */
#endif /* configENABLE_MPU */
select_next_task:
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r1, [r3] /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
movs r5, #1 /* r5 = 1. */
bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */
str r4, [r2] /* Disable MPU. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
movs r5, #4 /* r5 = 4. */
str r5, [r2] /* Program RNR = 4. */
ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */
movs r5, #5 /* r5 = 5. */
str r5, [r2] /* Program RNR = 5. */
ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */
movs r5, #6 /* r5 = 6. */
str r5, [r2] /* Program RNR = 6. */
ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */
movs r5, #7 /* r5 = 7. */
str r5, [r2] /* Program RNR = 7. */
ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */
ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */
stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
movs r5, #1 /* r5 = 1. */
orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */
str r4, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r0, r2-r4} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr control, r3 /* Restore the CONTROL register value for the task. */
mov lr, r4 /* LR = r4. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r4}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r4}
mov lr, r4 /* LR = r4. */
lsls r2, r4, #25 /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#else /* configENABLE_MPU */
ldmia r1!, {r0, r2-r3} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
mov lr, r3 /* LR = r3. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r3}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r3}
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#endif /* configENABLE_MPU */
restore_ns_context:
adds r1, r1, #16 /* Move to the high registers. */
ldmia r1!, {r4-r7} /* Restore the high registers that are not automatically restored. */
mov r8, r4 /* r8 = r4. */
mov r9, r5 /* r9 = r5. */
mov r10, r6 /* r10 = r6. */
mov r11, r7 /* r11 = r7. */
msr psp, r1 /* Remember the new top of stack for the task. */
subs r1, r1, #32 /* Go back to the low registers. */
ldmia r1!, {r4-r7} /* Restore the low registers that are not automatically restored. */
bx lr
/*-----------------------------------------------------------*/
SVC_Handler:
movs r0, #4
mov r1, lr
tst r0, r1
beq stacking_used_msp
mrs r0, psp
b vPortSVCHandler_C
stacking_used_msp:
mrs r0, msp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
ldr r1, [r0] /* The first item in the TCB is the top of the stack. */
ldr r0, [r1] /* The first item on the stack is the task's xSecureContext. */
cmp r0, #0 /* Raise svc if task's xSecureContext is not NULL. */
beq free_secure_context
bx lr /* There is no secure context (xSecureContext is NULL). */
free_secure_context:
svc 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 3,375
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM23/secure/secure_context_port_asm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
SECTION .text:CODE:NOROOT(2)
THUMB
PUBLIC SecureContext_LoadContextAsm
PUBLIC SecureContext_SaveContextAsm
#if ( configENABLE_FPU == 1 )
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
/*-----------------------------------------------------------*/
SecureContext_LoadContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
ldmia r0!, {r1, r2} /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
msr control, r3 /* CONTROL = r3. */
#endif /* configENABLE_MPU */
msr psplim, r2 /* PSPLIM = r2. */
msr psp, r1 /* PSP = r1. */
load_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
SecureContext_SaveContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
mrs r1, psp /* r1 = PSP. */
#if ( configENABLE_MPU == 1 )
mrs r2, control /* r2 = CONTROL. */
subs r1, r1, #4 /* Make space for the CONTROL value on the stack. */
str r1, [r0] /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
stmia r1!, {r2} /* Store CONTROL value on the stack. */
#else /* configENABLE_MPU */
str r1, [r0] /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
#endif /* configENABLE_MPU */
movs r1, #0 /* r1 = securecontextNO_STACK. */
msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
save_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 10,398
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
str r4, [r2] /* Disable MPU. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
str r4, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
msr control, r2 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r0!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
mrs r1, psplim /* r1 = PSPLIM. */
mrs r2, control /* r2 = CONTROL. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
#else /* configENABLE_MPU */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
str r0, [r1] /* Save the new top of stack in TCB. */
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */
str r4, [r2] /* Disable MPU. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r4, [r2] /* Read the value of MPU_CTRL. */
orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */
str r4, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
#endif /* configENABLE_MPU */
#if ( configENABLE_FPU == 1 )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r0!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
msr control, r2 /* Restore the CONTROL register value for the task. */
#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
/*-----------------------------------------------------------*/
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
END
|
Ai-Thinker-Open/Ai-Thinker-LoRaWAN-Ra-09
| 13,585
|
Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s
|
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
#if ( configENABLE_FPU == 1 )
#error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0.
#endif
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
movs r1, #1 /* r1 = 1. */
tst r0, r1 /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
beq running_privileged /* If the result of previous AND operation was 0, branch. */
movs r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
bx lr /* Return. */
running_privileged:
movs r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
movs r1, #1 /* r1 = 1. */
orrs r0, r1 /* r0 = r0 | r1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r3, [r2] /* Read the value of MPU_CTRL. */
movs r4, #1 /* r4 = 1. */
bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
str r3, [r2] /* Disable MPU. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
movs r4, #5 /* r4 = 5. */
str r4, [r2] /* Program RNR = 5. */
ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
movs r4, #6 /* r4 = 6. */
str r4, [r2] /* Program RNR = 6. */
ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
movs r4, #7 /* r4 = 7. */
str r4, [r2] /* Program RNR = 7. */
ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r3, [r2] /* Read the value of MPU_CTRL. */
movs r4, #1 /* r4 = 1. */
orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
str r3, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
msr control, r2 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
movs r1, #1 /* r1 = 1. */
bics r0, r1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
nop
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
#if ( configENABLE_MPU == 1 )
subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */
str r0, [r1] /* Save the new top of stack in TCB. */
mrs r1, psplim /* r1 = PSPLIM. */
mrs r2, control /* r2 = CONTROL. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */
mov r4, r8 /* r4 = r8. */
mov r5, r9 /* r5 = r9. */
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
#else /* configENABLE_MPU */
subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */
str r0, [r1] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r0!, {r2-r7} /* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
mov r4, r8 /* r4 = r8. */
mov r5, r9 /* r5 = r9. */
mov r6, r10 /* r6 = r10. */
mov r7, r11 /* r7 = r11. */
stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */
#endif /* configENABLE_MPU */
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
dmb /* Complete outstanding transfers before disabling MPU. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r3, [r2] /* Read the value of MPU_CTRL. */
movs r4, #1 /* r4 = 1. */
bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */
str r3, [r2] /* Disable MPU. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */
movs r4, #5 /* r4 = 5. */
str r4, [r2] /* Program RNR = 5. */
ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */
movs r4, #6 /* r4 = 6. */
str r4, [r2] /* Program RNR = 6. */
ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */
movs r4, #7 /* r4 = 7. */
str r4, [r2] /* Program RNR = 7. */
ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */
ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */
stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */
ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */
ldr r3, [r2] /* Read the value of MPU_CTRL. */
movs r4, #1 /* r4 = 1. */
orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */
str r3, [r2] /* Enable MPU. */
dsb /* Force memory writes before continuing. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
adds r0, r0, #28 /* Move to the high registers. */
ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
mov r8, r4 /* r8 = r4. */
mov r9, r5 /* r9 = r5. */
mov r10, r6 /* r10 = r6. */
mov r11, r7 /* r11 = r7. */
msr psp, r0 /* Remember the new top of stack for the task. */
subs r0, r0, #44 /* Move to the starting of the saved context. */
ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
msr control, r2 /* Restore the CONTROL register value for the task. */
bx r3
#else /* configENABLE_MPU */
adds r0, r0, #24 /* Move to the high registers. */
ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */
mov r8, r4 /* r8 = r4. */
mov r9, r5 /* r9 = r5. */
mov r10, r6 /* r10 = r6. */
mov r11, r7 /* r11 = r7. */
msr psp, r0 /* Remember the new top of stack for the task. */
subs r0, r0, #40 /* Move to the starting of the saved context. */
ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
bx r3
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
SVC_Handler:
movs r0, #4
mov r1, lr
tst r0, r1
beq stacking_used_msp
mrs r0, psp
b vPortSVCHandler_C
stacking_used_msp:
mrs r0, msp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
END
|
aixcc-public/challenge-001-exemplar-source
| 1,243
|
usr/initramfs_data.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
initramfs_data includes the compressed binary that is the
filesystem used for early user space.
Note: Older versions of "as" (prior to binutils 2.11.90.0.23
released on 2001-07-14) dit not support .incbin.
If you are forced to use older binutils than that then the
following trick can be applied to create the resulting binary:
ld -m elf_i386 --format binary --oformat elf32-i386 -r \
-T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
ld -m elf_i386 -r -o built-in.a initramfs_data.o
For including the .init.ramfs sections, see include/asm-generic/vmlinux.lds.
The above example is for i386 - the parameters vary from architectures.
Eventually look up LDFLAGS_BLOB in an older version of the
arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
Using .incbin has the advantage over ld that the correct flags are set
in the ELF header, as required by certain architectures.
*/
.section .init.ramfs,"a"
__irf_start:
.incbin "usr/initramfs_inc_data"
__irf_end:
.section .init.ramfs.info,"a"
.globl __initramfs_size
__initramfs_size:
#ifdef CONFIG_64BIT
.quad __irf_end - __irf_start
#else
.long __irf_end - __irf_start
#endif
|
aixcc-public/challenge-001-exemplar-source
| 1,342
|
scripts/module.lds.S
|
/*
* Common module linker script, always used when linking a module.
* Archs are free to supply their own linker scripts. ld will
* combine them automatically.
*/
SECTIONS {
/DISCARD/ : {
*(.discard)
*(.discard.*)
}
__ksymtab 0 : { *(SORT(___ksymtab+*)) }
__ksymtab_gpl 0 : { *(SORT(___ksymtab_gpl+*)) }
__kcrctab 0 : { *(SORT(___kcrctab+*)) }
__kcrctab_gpl 0 : { *(SORT(___kcrctab_gpl+*)) }
.ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) }
.init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) }
.altinstructions 0 : ALIGN(8) { KEEP(*(.altinstructions)) }
__bug_table 0 : ALIGN(8) { KEEP(*(__bug_table)) }
__jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
__patchable_function_entries : { *(__patchable_function_entries) }
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
__kcfi_traps : { KEEP(*(.kcfi_traps)) }
#endif
#ifdef CONFIG_LTO_CLANG
/*
* With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
* -ffunction-sections, which increases the size of the final module.
* Merge the split sections in the final binary.
*/
.bss : {
*(.bss .bss.[0-9a-zA-Z_]*)
*(.bss..L*)
}
.data : {
*(.data .data.[0-9a-zA-Z_]*)
*(.data..L*)
}
.rodata : {
*(.rodata .rodata.[0-9a-zA-Z_]*)
*(.rodata..L*)
}
#endif
}
/* bring in arch-specific sections */
#include <asm/module.lds.h>
|
ajaxorg/ace
| 1,491
|
demo/kitchen-sink/docs/assembly_arm32.s
|
.section .text
.global _start
.global msg
_start:
bl _asmtest
mov r1, r0 // move return value into r1 for syscall write
/* syscall write(int fd, const void *buf, size_t count) */
mov r0, #1
//ldr r1, =msg // done by above call to _asmtest
ldr r2, =len
mov r7, #4
svc #0
/* syscall exit(int status) */
mov r0, #0
mov r7, #1
svc #0
_asmtest: // Start of the function
ldr r0, =msg ; loads pointer for msg into r0
bx lr // Returns from the function
msg:
.ascii "Hello, ARM32!\n"
len = . - msg
// All of these should match as instructions
addhs
adceqs
qaddne
qdaddcs
subcc
sbclo
rsbmi
rscpl
qsubvs
qdsubvc
mulhi
mlals
umullge
umlallt
umaalgt
smullle
smlalal
smulbb
smulwb
smlabt
smlawt
smlaltb
smuad
smladx
smlald
smusdx
smlsd
smlsldx
smmul
smmlar
smmls
mia
miaph
miatt
clz
sadd16
qsub16
shadd8
usub8
uqaddsubx
uhsubaddx
usad8
usada8
mov
movt
movw
mvn
mrs
msr
mra
mar
cpy
tst
teq
and
eor
orr
bic
cmp
cmn
ssat
ssat16
usat
usat16
pkhbt
pkhtb
sxth
sxtb16
sxtb
uxth
uxtb16
uxtb
sxtah
sxtab16
sxtab
uxtah
uxtab16
uxtab
rev
rev16
revsh
sel
b
bl
bx
blx
blx
bxj
cpsid
cpsie
cps
setend
srsia
rfeib
srsda
rfedb
srsfd
rfeed
srsfa
rfeea
bkpt
swi
svc
nop
ldr
ldrt
ldrb
ldrbt
ldrsb
ldrh
ldrsh
ldrd
ldmia
ldmfa
pld
ldrex
str
strt
strb
strbt
strh
strd
stmia
stmfd
strex
swp
swpb
cdp
cdp2
mrc
mrc2
mrrc
mrrc2
mcr
mcr2
mcrr
mcrr2
ldc
ldc2
stc
stc2
// End instruction matching test
|
ajaxorg/ace-builds
| 1,491
|
demo/kitchen-sink/docs/assembly_arm32.s
|
.section .text
.global _start
.global msg
_start:
bl _asmtest
mov r1, r0 // move return value into r1 for syscall write
/* syscall write(int fd, const void *buf, size_t count) */
mov r0, #1
//ldr r1, =msg // done by above call to _asmtest
ldr r2, =len
mov r7, #4
svc #0
/* syscall exit(int status) */
mov r0, #0
mov r7, #1
svc #0
_asmtest: // Start of the function
ldr r0, =msg ; loads pointer for msg into r0
bx lr // Returns from the function
msg:
.ascii "Hello, ARM32!\n"
len = . - msg
// All of these should match as instructions
addhs
adceqs
qaddne
qdaddcs
subcc
sbclo
rsbmi
rscpl
qsubvs
qdsubvc
mulhi
mlals
umullge
umlallt
umaalgt
smullle
smlalal
smulbb
smulwb
smlabt
smlawt
smlaltb
smuad
smladx
smlald
smusdx
smlsd
smlsldx
smmul
smmlar
smmls
mia
miaph
miatt
clz
sadd16
qsub16
shadd8
usub8
uqaddsubx
uhsubaddx
usad8
usada8
mov
movt
movw
mvn
mrs
msr
mra
mar
cpy
tst
teq
and
eor
orr
bic
cmp
cmn
ssat
ssat16
usat
usat16
pkhbt
pkhtb
sxth
sxtb16
sxtb
uxth
uxtb16
uxtb
sxtah
sxtab16
sxtab
uxtah
uxtab16
uxtab
rev
rev16
revsh
sel
b
bl
bx
blx
blx
bxj
cpsid
cpsie
cps
setend
srsia
rfeib
srsda
rfedb
srsfd
rfeed
srsfa
rfeea
bkpt
swi
svc
nop
ldr
ldrt
ldrb
ldrbt
ldrsb
ldrh
ldrsh
ldrd
ldmia
ldmfa
pld
ldrex
str
strt
strb
strbt
strh
strd
stmia
stmfd
strex
swp
swpb
cdp
cdp2
mrc
mrc2
mrrc
mrrc2
mcr
mcr2
mcrr
mcrr2
ldc
ldc2
stc
stc2
// End instruction matching test
|
aixcc-public/challenge-001-exemplar-source
| 10,906
|
drivers/memory/ti-emif-sram-pm.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Low level PM code for TI EMIF
*
* Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include "emif.h"
#include "ti-emif-asm-offsets.h"
#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
#define EMIF_POWER_MGMT_SR_TIMER_MASK 0x00f0
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE 0x0200
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700
#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT
#define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT
#define EMIF_STATUS_READY 0x4
#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120
#define EMIF_AM437X_REGISTERS 0x1
.arm
.align 3
ENTRY(ti_emif_sram)
/*
* void ti_emif_save_context(void)
*
* Used during suspend to save the context of all required EMIF registers
* to local memory if the EMIF is going to lose context during the sleep
* transition. Operates on the VIRTUAL address of the EMIF.
*/
ENTRY(ti_emif_save_context)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
/* Save EMIF configuration */
ldr r1, [r0, #EMIF_SDRAM_CONFIG]
str r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
str r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_1]
str r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_2]
str r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_3]
str r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
str r1, [r2, #EMIF_PMCR_VAL_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
str r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
str r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
ldr r1, [r0, #EMIF_DDR_PHY_CTRL_1]
str r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
ldr r1, [r0, #EMIF_COS_CONFIG]
str r1, [r2, #EMIF_COS_CONFIG_OFFSET]
ldr r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
str r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
str r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
str r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
ldr r1, [r0, #EMIF_OCP_CONFIG]
str r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
bne emif_skip_save_extra_regs
ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
str r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
ldr r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
str r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
str r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
str r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
ldr r1, [r0, #EMIF_DLL_CALIB_CTRL]
str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
ldr r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
/* Loop and save entire block of emif phy regs */
mov r5, #0x0
add r4, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
add r3, r0, #EMIF_EXT_PHY_CTRL_1
ddr_phy_ctrl_save:
ldr r1, [r3, r5]
str r1, [r4, r5]
add r5, r5, #0x4
cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
bne ddr_phy_ctrl_save
emif_skip_save_extra_regs:
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_save_context)
/*
* void ti_emif_restore_context(void)
*
* Used during resume to restore the context of all required EMIF registers
* from local memory after the EMIF has lost context during a sleep transition.
* Operates on the PHYSICAL address of the EMIF.
*/
ENTRY(ti_emif_restore_context)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
/* Config EMIF Timings */
ldr r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
str r1, [r0, #EMIF_DDR_PHY_CTRL_1]
str r1, [r0, #EMIF_DDR_PHY_CTRL_1_SHDW]
ldr r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_1]
str r1, [r0, #EMIF_SDRAM_TIMING_1_SHDW]
ldr r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_2]
str r1, [r0, #EMIF_SDRAM_TIMING_2_SHDW]
ldr r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_3]
str r1, [r0, #EMIF_SDRAM_TIMING_3_SHDW]
ldr r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
str r1, [r0, #EMIF_SDRAM_REFRESH_CTRL_SHDW]
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
ldr r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
str r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
ldr r1, [r2, #EMIF_COS_CONFIG_OFFSET]
str r1, [r0, #EMIF_COS_CONFIG]
ldr r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
str r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
ldr r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
ldr r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
ldr r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
str r1, [r0, #EMIF_OCP_CONFIG]
ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
bne emif_skip_restore_extra_regs
ldr r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
str r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
ldr r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
str r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
str r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
str r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
str r1, [r0, #EMIF_DLL_CALIB_CTRL]
ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
str r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
/* Loop and restore entire block of emif phy regs */
mov r5, #0x0
/* Load ti_emif_regs_amx3 + EMIF_EXT_PHY_CTRL_VALS_OFFSET for address
* to phy register save space
*/
add r3, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
add r4, r0, #EMIF_EXT_PHY_CTRL_1
ddr_phy_ctrl_restore:
ldr r1, [r3, r5]
str r1, [r4, r5]
add r5, r5, #0x4
cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
bne ddr_phy_ctrl_restore
emif_skip_restore_extra_regs:
/*
* Output impedence calib needed only for DDR3
* but since the initial state of this will be
* disabled for DDR2 no harm in restoring the
* old configuration
*/
ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
/* Write to sdcfg last for DDR2 only */
ldr r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
and r2, r1, #SDRAM_TYPE_MASK
cmp r2, #EMIF_SDCFG_TYPE_DDR2
streq r1, [r0, #EMIF_SDRAM_CONFIG]
mov pc, lr
ENDPROC(ti_emif_restore_context)
/*
* void ti_emif_run_hw_leveling(void)
*
* Used during resume to run hardware leveling again and restore the
* configuration of the EMIF PHY, only for DDR3.
*/
ENTRY(ti_emif_run_hw_leveling)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
orr r3, r3, #RDWRLVLFULL_START
ldr r2, [r0, #EMIF_SDRAM_CONFIG]
and r2, r2, #SDRAM_TYPE_MASK
cmp r2, #EMIF_SDCFG_TYPE_DDR3
bne skip_hwlvl
str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
/*
* If EMIF registers are touched during initial stage of HW
* leveling sequence there will be an L3 NOC timeout error issued
* as the EMIF will not respond, which is not fatal, but it is
* avoidable. This small wait loop is enough time for this condition
* to clear, even at worst case of CPU running at max speed of 1Ghz.
*/
mov r2, #0x2000
1:
subs r2, r2, #0x1
bne 1b
/* Bit clears when operation is complete */
2: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
tst r1, #RDWRLVLFULL_START
bne 2b
skip_hwlvl:
mov pc, lr
ENDPROC(ti_emif_run_hw_leveling)
/*
* void ti_emif_enter_sr(void)
*
* Programs the EMIF to tell the SDRAM to enter into self-refresh
* mode during a sleep transition. Operates on the VIRTUAL address
* of the EMIF.
*/
ENTRY(ti_emif_enter_sr)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_enter_sr)
/*
* void ti_emif_exit_sr(void)
*
* Programs the EMIF to tell the SDRAM to exit self-refresh mode
* after a sleep transition. Operates on the PHYSICAL address of
* the EMIF.
*/
ENTRY(ti_emif_exit_sr)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
/*
* Toggle EMIF to exit refresh mode:
* if EMIF lost context, PWR_MGT_CTRL is currently 0, writing disable
* (0x0), wont do diddly squat! so do a toggle from SR(0x2) to disable
* (0x0) here.
* *If* EMIF did not lose context, nothing broken as we write the same
* value(0x2) to reg before we write a disable (0x0).
*/
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
/* Wait for EMIF to become ready */
1: ldr r1, [r0, #EMIF_STATUS]
tst r1, #EMIF_STATUS_READY
beq 1b
mov pc, lr
ENDPROC(ti_emif_exit_sr)
/*
* void ti_emif_abort_sr(void)
*
* Disables self-refresh after a failed transition to a low-power
* state so the kernel can jump back to DDR and follow abort path.
* Operates on the VIRTUAL address of the EMIF.
*/
ENTRY(ti_emif_abort_sr)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
/* Wait for EMIF to become ready */
1: ldr r1, [r0, #EMIF_STATUS]
tst r1, #EMIF_STATUS_READY
beq 1b
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_abort_sr)
.align 3
ENTRY(ti_emif_pm_sram_data)
.space EMIF_PM_DATA_SIZE
ENTRY(ti_emif_sram_sz)
.word . - ti_emif_save_context
|
aixcc-public/challenge-001-exemplar-source
| 4,485
|
drivers/block/swim_asm.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* low-level functions for the SWIM floppy controller
*
* needs assembly language because is very timing dependent
* this controller exists only on macintosh 680x0 based
*
* Copyright (C) 2004,2008 Laurent Vivier <Laurent@lvivier.info>
*
* based on Alastair Bridgewater SWIM analysis, 2001
* based on netBSD IWM driver (c) 1997, 1998 Hauke Fath.
*
* 2004-08-21 (lv) - Initial implementation
* 2008-11-05 (lv) - add get_swim_mode
*/
.equ write_data, 0x0000
.equ write_mark, 0x0200
.equ write_CRC, 0x0400
.equ write_parameter,0x0600
.equ write_phase, 0x0800
.equ write_setup, 0x0a00
.equ write_mode0, 0x0c00
.equ write_mode1, 0x0e00
.equ read_data, 0x1000
.equ read_mark, 0x1200
.equ read_error, 0x1400
.equ read_parameter, 0x1600
.equ read_phase, 0x1800
.equ read_setup, 0x1a00
.equ read_status, 0x1c00
.equ read_handshake, 0x1e00
.equ o_side, 0
.equ o_track, 1
.equ o_sector, 2
.equ o_size, 3
.equ o_crc0, 4
.equ o_crc1, 5
.equ seek_time, 30000
.equ max_retry, 40
.equ sector_size, 512
.global swim_read_sector_header
swim_read_sector_header:
link %a6, #0
moveml %d1-%d5/%a0-%a4,%sp@-
movel %a6@(0x0c), %a4
bsr mfm_read_addrmark
moveml %sp@+, %d1-%d5/%a0-%a4
unlk %a6
rts
sector_address_mark:
.byte 0xa1, 0xa1, 0xa1, 0xfe
sector_data_mark:
.byte 0xa1, 0xa1, 0xa1, 0xfb
mfm_read_addrmark:
movel %a6@(0x08), %a3
lea %a3@(read_handshake), %a2
lea %a3@(read_mark), %a3
moveq #-1, %d0
movew #seek_time, %d2
wait_header_init:
tstb %a3@(read_error - read_mark)
moveb #0x18, %a3@(write_mode0 - read_mark)
moveb #0x01, %a3@(write_mode1 - read_mark)
moveb #0x01, %a3@(write_mode0 - read_mark)
tstb %a3@(read_error - read_mark)
moveb #0x08, %a3@(write_mode1 - read_mark)
lea sector_address_mark, %a0
moveq #3, %d1
wait_addr_mark_byte:
tstb %a2@
dbmi %d2, wait_addr_mark_byte
bpl header_exit
moveb %a3@, %d3
cmpb %a0@+, %d3
dbne %d1, wait_addr_mark_byte
bne wait_header_init
moveq #max_retry, %d2
amark0: tstb %a2@
dbmi %d2, amark0
bpl signal_nonyb
moveb %a3@, %a4@(o_track)
moveq #max_retry, %d2
amark1: tstb %a2@
dbmi %d2, amark1
bpl signal_nonyb
moveb %a3@, %a4@(o_side)
moveq #max_retry, %d2
amark2: tstb %a2@
dbmi %d2, amark2
bpl signal_nonyb
moveb %a3@, %a4@(o_sector)
moveq #max_retry, %d2
amark3: tstb %a2@
dbmi %d2, amark3
bpl signal_nonyb
moveb %a3@, %a4@(o_size)
moveq #max_retry, %d2
crc0: tstb %a2@
dbmi %d2, crc0
bpl signal_nonyb
moveb %a3@, %a4@(o_crc0)
moveq #max_retry, %d2
crc1: tstb %a2@
dbmi %d2, crc1
bpl signal_nonyb
moveb %a3@, %a4@(o_crc1)
tstb %a3@(read_error - read_mark)
header_exit:
moveq #0, %d0
moveb #0x18, %a3@(write_mode0 - read_mark)
rts
signal_nonyb:
moveq #-1, %d0
moveb #0x18, %a3@(write_mode0 - read_mark)
rts
.global swim_read_sector_data
swim_read_sector_data:
link %a6, #0
moveml %d1-%d5/%a0-%a5,%sp@-
movel %a6@(0x0c), %a4
bsr mfm_read_data
moveml %sp@+, %d1-%d5/%a0-%a5
unlk %a6
rts
mfm_read_data:
movel %a6@(0x08), %a3
lea %a3@(read_handshake), %a2
lea %a3@(read_data), %a5
lea %a3@(read_mark), %a3
movew #seek_time, %d2
wait_data_init:
tstb %a3@(read_error - read_mark)
moveb #0x18, %a3@(write_mode0 - read_mark)
moveb #0x01, %a3@(write_mode1 - read_mark)
moveb #0x01, %a3@(write_mode0 - read_mark)
tstb %a3@(read_error - read_mark)
moveb #0x08, %a3@(write_mode1 - read_mark)
lea sector_data_mark, %a0
moveq #3, %d1
/* wait data address mark */
wait_data_mark_byte:
tstb %a2@
dbmi %d2, wait_data_mark_byte
bpl data_exit
moveb %a3@, %d3
cmpb %a0@+, %d3
dbne %d1, wait_data_mark_byte
bne wait_data_init
/* read data */
tstb %a3@(read_error - read_mark)
movel #sector_size-1, %d4 /* sector size */
read_new_data:
movew #max_retry, %d2
read_data_loop:
moveb %a2@, %d5
andb #0xc0, %d5
dbne %d2, read_data_loop
beq data_exit
moveb %a5@, %a4@+
andb #0x40, %d5
dbne %d4, read_new_data
beq exit_loop
moveb %a5@, %a4@+
dbra %d4, read_new_data
exit_loop:
/* read CRC */
movew #max_retry, %d2
data_crc0:
tstb %a2@
dbmi %d2, data_crc0
bpl data_exit
moveb %a3@, %d5
moveq #max_retry, %d2
data_crc1:
tstb %a2@
dbmi %d2, data_crc1
bpl data_exit
moveb %a3@, %d5
tstb %a3@(read_error - read_mark)
moveb #0x18, %a3@(write_mode0 - read_mark)
/* return number of bytes read */
movel #sector_size, %d0
addw #1, %d4
subl %d4, %d0
rts
data_exit:
moveb #0x18, %a3@(write_mode0 - read_mark)
moveq #-1, %d0
rts
|
aixcc-public/challenge-001-exemplar-source
| 1,814
|
drivers/crypto/n2_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* n2_asm.S: Hypervisor calls for NCS support.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
#include "n2_core.h"
/* o0: queue type
* o1: RA of queue
* o2: num entries in queue
* o3: address of queue handle return
*/
ENTRY(sun4v_ncs_qconf)
mov HV_FAST_NCS_QCONF, %o5
ta HV_FAST_TRAP
stx %o1, [%o3]
retl
nop
ENDPROC(sun4v_ncs_qconf)
/* %o0: queue handle
* %o1: address of queue type return
* %o2: address of queue base address return
* %o3: address of queue num entries return
*/
ENTRY(sun4v_ncs_qinfo)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_NCS_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ncs_qinfo)
/* %o0: queue handle
* %o1: address of head offset return
*/
ENTRY(sun4v_ncs_gethead)
mov %o1, %o2
mov HV_FAST_NCS_GETHEAD, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gethead)
/* %o0: queue handle
* %o1: address of tail offset return
*/
ENTRY(sun4v_ncs_gettail)
mov %o1, %o2
mov HV_FAST_NCS_GETTAIL, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gettail)
/* %o0: queue handle
* %o1: new tail offset
*/
ENTRY(sun4v_ncs_settail)
mov HV_FAST_NCS_SETTAIL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_settail)
/* %o0: queue handle
* %o1: address of devino return
*/
ENTRY(sun4v_ncs_qhandle_to_devino)
mov %o1, %o2
mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_qhandle_to_devino)
/* %o0: queue handle
* %o1: new head offset
*/
ENTRY(sun4v_ncs_sethead_marker)
mov HV_FAST_NCS_SETHEAD_MARKER, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_sethead_marker)
|
aixcc-public/challenge-001-exemplar-source
| 2,042
|
drivers/watchdog/octeon-wdt-nmi.S
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007-2017 Cavium, Inc.
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#define CVMSEG_BASE -32768
#define CVMSEG_SIZE 6912
#define SAVE_REG(r) sd $r, CVMSEG_BASE + CVMSEG_SIZE - ((32 - r) * 8)($0)
NESTED(octeon_wdt_nmi_stage2, 0, sp)
.set push
.set noreorder
.set noat
/* Clear Dcache so cvmseg works right. */
cache 1,0($0)
/* Use K0 to do a read/modify/write of CVMMEMCTL */
dmfc0 k0, $11, 7
/* Clear out the size of CVMSEG */
dins k0, $0, 0, 6
/* Set CVMSEG to its largest value */
ori k0, k0, 0x1c0 | 54
/* Store the CVMMEMCTL value */
dmtc0 k0, $11, 7
/*
* Restore K0 from the debug scratch register, it was saved in
* the boot-vector code.
*/
dmfc0 k0, $31
/*
* Save all registers to the top CVMSEG. This shouldn't
* corrupt any state used by the kernel. Also all registers
* should have the value right before the NMI.
*/
SAVE_REG(0)
SAVE_REG(1)
SAVE_REG(2)
SAVE_REG(3)
SAVE_REG(4)
SAVE_REG(5)
SAVE_REG(6)
SAVE_REG(7)
SAVE_REG(8)
SAVE_REG(9)
SAVE_REG(10)
SAVE_REG(11)
SAVE_REG(12)
SAVE_REG(13)
SAVE_REG(14)
SAVE_REG(15)
SAVE_REG(16)
SAVE_REG(17)
SAVE_REG(18)
SAVE_REG(19)
SAVE_REG(20)
SAVE_REG(21)
SAVE_REG(22)
SAVE_REG(23)
SAVE_REG(24)
SAVE_REG(25)
SAVE_REG(26)
SAVE_REG(27)
SAVE_REG(28)
SAVE_REG(29)
SAVE_REG(30)
SAVE_REG(31)
/* Write zero to all CVMSEG locations per Core-15169 */
dli a0, CVMSEG_SIZE - (33 * 8)
1: sd zero, CVMSEG_BASE(a0)
daddiu a0, a0, -8
bgez a0, 1b
nop
/* Set the stack to begin right below the registers */
dli sp, CVMSEG_BASE + CVMSEG_SIZE - (32 * 8)
/* Load the address of the third stage handler */
dla $25, octeon_wdt_nmi_stage3
/* Call the third stage handler */
jal $25
/* a0 is the address of the saved registers */
move a0, sp
/* Loop forvever if we get here. */
2: b 2b
nop
.set pop
END(octeon_wdt_nmi_stage2)
|
aixcc-public/challenge-001-exemplar-source
| 1,538
|
drivers/char/hw_random/n2-asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* n2-asm.S: Niagara2 RNG hypervisor call assembler.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
#include "n2rng.h"
.text
ENTRY(sun4v_rng_get_diag_ctl)
mov HV_FAST_RNG_GET_DIAG_CTL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_rng_get_diag_ctl)
ENTRY(sun4v_rng_ctl_read_v1)
mov %o1, %o3
mov %o2, %o4
mov HV_FAST_RNG_CTL_READ, %o5
ta HV_FAST_TRAP
stx %o1, [%o3]
retl
stx %o2, [%o4]
ENDPROC(sun4v_rng_ctl_read_v1)
ENTRY(sun4v_rng_ctl_read_v2)
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
mov HV_FAST_RNG_CTL_READ, %o5
ta HV_FAST_TRAP
stx %o1, [%i2]
stx %o2, [%i3]
stx %o3, [%i4]
stx %o4, [%i5]
ret
restore %g0, %o0, %o0
ENDPROC(sun4v_rng_ctl_read_v2)
ENTRY(sun4v_rng_ctl_write_v1)
mov %o3, %o4
mov HV_FAST_RNG_CTL_WRITE, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_ctl_write_v1)
ENTRY(sun4v_rng_ctl_write_v2)
mov HV_FAST_RNG_CTL_WRITE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_rng_ctl_write_v2)
ENTRY(sun4v_rng_data_read_diag_v1)
mov %o2, %o4
mov HV_FAST_RNG_DATA_READ_DIAG, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read_diag_v1)
ENTRY(sun4v_rng_data_read_diag_v2)
mov %o3, %o4
mov HV_FAST_RNG_DATA_READ_DIAG, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read_diag_v2)
ENTRY(sun4v_rng_data_read)
mov %o1, %o4
mov HV_FAST_RNG_DATA_READ, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read)
|
aixcc-public/challenge-001-exemplar-source
| 2,152
|
drivers/soc/bcm/brcmstb/pm/s3-mips.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Broadcom Corporation
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/bmips.h>
#include "pm.h"
.text
.set noreorder
.align 5
.global s3_reentry
/*
* a0: AON_CTRL base register
* a1: D-Cache line size
*/
LEAF(brcm_pm_do_s3)
/* Get the address of s3_context */
la t0, gp_regs
sw ra, 0(t0)
sw s0, 4(t0)
sw s1, 8(t0)
sw s2, 12(t0)
sw s3, 16(t0)
sw s4, 20(t0)
sw s5, 24(t0)
sw s6, 28(t0)
sw s7, 32(t0)
sw gp, 36(t0)
sw sp, 40(t0)
sw fp, 44(t0)
/* Save CP0 Status */
mfc0 t1, CP0_STATUS
sw t1, 48(t0)
/* Write-back gp registers - cache will be gone */
addiu t1, a1, -1
not t1
and t0, t1
/* Flush at least 64 bytes */
addiu t2, t0, 64
and t2, t1
1: cache 0x17, 0(t0)
bne t0, t2, 1b
addu t0, a1
/* Drop to deep standby */
li t1, PM_WARM_CONFIG
sw zero, AON_CTRL_PM_CTRL(a0)
lw zero, AON_CTRL_PM_CTRL(a0)
sw t1, AON_CTRL_PM_CTRL(a0)
lw t1, AON_CTRL_PM_CTRL(a0)
li t1, (PM_WARM_CONFIG | PM_PWR_DOWN)
sw t1, AON_CTRL_PM_CTRL(a0)
lw t1, AON_CTRL_PM_CTRL(a0)
/* Enable CP0 interrupt 2 and wait for interrupt */
mfc0 t0, CP0_STATUS
li t1, ~(ST0_IM | ST0_IE)
and t0, t1
ori t0, STATUSF_IP2
mtc0 t0, CP0_STATUS
nop
nop
nop
ori t0, ST0_IE
mtc0 t0, CP0_STATUS
/* Wait for interrupt */
wait
nop
s3_reentry:
/* Clear call/return stack */
li t0, (0x06 << 16)
mtc0 t0, $22, 2
ssnop
ssnop
ssnop
/* Clear jump target buffer */
li t0, (0x04 << 16)
mtc0 t0, $22, 2
ssnop
ssnop
ssnop
sync
nop
/* Setup mmu defaults */
mtc0 zero, CP0_WIRED
mtc0 zero, CP0_ENTRYHI
li k0, PM_DEFAULT_MASK
mtc0 k0, CP0_PAGEMASK
li sp, BMIPS_WARM_RESTART_VEC
la k0, plat_wired_tlb_setup
jalr k0
nop
/* Restore general purpose registers */
la t0, gp_regs
lw fp, 44(t0)
lw sp, 40(t0)
lw gp, 36(t0)
lw s7, 32(t0)
lw s6, 28(t0)
lw s5, 24(t0)
lw s4, 20(t0)
lw s3, 16(t0)
lw s2, 12(t0)
lw s1, 8(t0)
lw s0, 4(t0)
lw ra, 0(t0)
/* Restore CP0 status */
lw t1, 48(t0)
mtc0 t1, CP0_STATUS
/* Return to caller */
li v0, 0
jr ra
nop
END(brcm_pm_do_s3)
|
aixcc-public/challenge-001-exemplar-source
| 1,305
|
drivers/soc/bcm/brcmstb/pm/s2-arm.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2014-2017 Broadcom
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "pm.h"
.text
.align 3
#define AON_CTRL_REG r10
#define DDR_PHY_STATUS_REG r11
/*
* r0: AON_CTRL base address
* r1: DDRY PHY PLL status register address
*/
ENTRY(brcmstb_pm_do_s2)
stmfd sp!, {r4-r11, lr}
mov AON_CTRL_REG, r0
mov DDR_PHY_STATUS_REG, r1
/* Flush memory transactions */
dsb
/* Cache DDR_PHY_STATUS_REG translation */
ldr r0, [DDR_PHY_STATUS_REG]
/* power down request */
ldr r0, =PM_S2_COMMAND
ldr r1, =0
str r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
ldr r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
/* Wait for interrupt */
wfi
nop
/* Bring MEMC back up */
1: ldr r0, [DDR_PHY_STATUS_REG]
ands r0, #1
beq 1b
/* Power-up handshake */
ldr r0, =1
str r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
ldr r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
ldr r0, =0
str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
/* Return to caller */
ldr r0, =0
ldmfd sp!, {r4-r11, pc}
ENDPROC(brcmstb_pm_do_s2)
/* Place literal pool here */
.ltorg
ENTRY(brcmstb_pm_do_s2_sz)
.word . - brcmstb_pm_do_s2
|
aixcc-public/challenge-001-exemplar-source
| 2,947
|
drivers/soc/bcm/brcmstb/pm/s2-mips.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Broadcom Corporation
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include "pm.h"
.text
.set noreorder
.align 5
/*
* a0: u32 params array
*/
LEAF(brcm_pm_do_s2)
subu sp, 64
sw ra, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
sw s3, 16(sp)
sw s4, 20(sp)
sw s5, 24(sp)
sw s6, 28(sp)
sw s7, 32(sp)
/*
* Dereference the params array
* s0: AON_CTRL base register
* s1: DDR_PHY base register
* s2: TIMERS base register
* s3: I-Cache line size
* s4: Restart vector address
* s5: Restart vector size
*/
move t0, a0
lw s0, 0(t0)
lw s1, 4(t0)
lw s2, 8(t0)
lw s3, 12(t0)
lw s4, 16(t0)
lw s5, 20(t0)
/* Lock this asm section into the I-cache */
addiu t1, s3, -1
not t1
la t0, brcm_pm_do_s2
and t0, t1
la t2, asm_end
and t2, t1
1: cache 0x1c, 0(t0)
bne t0, t2, 1b
addu t0, s3
/* Lock the interrupt vector into the I-cache */
move t0, zero
2: move t1, s4
cache 0x1c, 0(t1)
addu t1, s3
addu t0, s3
ble t0, s5, 2b
nop
sync
/* Power down request */
li t0, PM_S2_COMMAND
sw zero, AON_CTRL_PM_CTRL(s0)
lw zero, AON_CTRL_PM_CTRL(s0)
sw t0, AON_CTRL_PM_CTRL(s0)
lw t0, AON_CTRL_PM_CTRL(s0)
/* Enable CP0 interrupt 2 and wait for interrupt */
mfc0 t0, CP0_STATUS
/* Save cp0 sr for restoring later */
move s6, t0
li t1, ~(ST0_IM | ST0_IE)
and t0, t1
ori t0, STATUSF_IP2
mtc0 t0, CP0_STATUS
nop
nop
nop
ori t0, ST0_IE
mtc0 t0, CP0_STATUS
/* Wait for interrupt */
wait
nop
/* Wait for memc0 */
1: lw t0, DDR40_PHY_CONTROL_REGS_0_PLL_STATUS(s1)
andi t0, 1
beqz t0, 1b
nop
/* 1ms delay needed for stable recovery */
/* Use TIMER1 to count 1 ms */
li t0, RESET_TIMER
sw t0, TIMER_TIMER1_CTRL(s2)
lw t0, TIMER_TIMER1_CTRL(s2)
li t0, START_TIMER
sw t0, TIMER_TIMER1_CTRL(s2)
lw t0, TIMER_TIMER1_CTRL(s2)
/* Prepare delay */
li t0, TIMER_MASK
lw t1, TIMER_TIMER1_STAT(s2)
and t1, t0
/* 1ms delay */
addi t1, 27000
/* Wait for the timer value to exceed t1 */
1: lw t0, TIMER_TIMER1_STAT(s2)
sgtu t2, t1, t0
bnez t2, 1b
nop
/* Power back up */
li t1, 1
sw t1, AON_CTRL_HOST_MISC_CMDS(s0)
lw t1, AON_CTRL_HOST_MISC_CMDS(s0)
sw zero, AON_CTRL_PM_CTRL(s0)
lw zero, AON_CTRL_PM_CTRL(s0)
/* Unlock I-cache */
addiu t1, s3, -1
not t1
la t0, brcm_pm_do_s2
and t0, t1
la t2, asm_end
and t2, t1
1: cache 0x00, 0(t0)
bne t0, t2, 1b
addu t0, s3
/* Unlock interrupt vector */
move t0, zero
2: move t1, s4
cache 0x00, 0(t1)
addu t1, s3
addu t0, s3
ble t0, s5, 2b
nop
/* Restore cp0 sr */
sync
nop
mtc0 s6, CP0_STATUS
nop
/* Set return value to success */
li v0, 0
/* Return to caller */
lw s7, 32(sp)
lw s6, 28(sp)
lw s5, 24(sp)
lw s4, 20(sp)
lw s3, 16(sp)
lw s2, 12(sp)
lw s1, 8(sp)
lw s0, 4(sp)
lw ra, 0(sp)
addiu sp, 64
jr ra
nop
END(brcm_pm_do_s2)
.globl asm_end
asm_end:
nop
|
aixcc-public/challenge-001-exemplar-source
| 23,572
|
drivers/net/wan/wanxlfw.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
.psize 0
/*
wanXL serial card driver for Linux
card firmware part
Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
DPRAM BDs:
0x000 - 0x050 TX#0 0x050 - 0x140 RX#0
0x140 - 0x190 TX#1 0x190 - 0x280 RX#1
0x280 - 0x2D0 TX#2 0x2D0 - 0x3C0 RX#2
0x3C0 - 0x410 TX#3 0x410 - 0x500 RX#3
000 5FF 1536 Bytes Dual-Port RAM User Data / BDs
600 6FF 256 Bytes Dual-Port RAM User Data / BDs
700 7FF 256 Bytes Dual-Port RAM User Data / BDs
C00 CBF 192 Bytes Dual-Port RAM Parameter RAM Page 1
D00 DBF 192 Bytes Dual-Port RAM Parameter RAM Page 2
E00 EBF 192 Bytes Dual-Port RAM Parameter RAM Page 3
F00 FBF 192 Bytes Dual-Port RAM Parameter RAM Page 4
local interrupts level
NMI 7
PIT timer, CPM (RX/TX complete) 4
PCI9060 DMA and PCI doorbells 3
Cable - not used 1
*/
#include <linux/hdlc.h>
#include <linux/hdlc/ioctl.h>
#include "wanxl.h"
/* memory addresses and offsets */
MAX_RAM_SIZE = 16 * 1024 * 1024 // max RAM supported by hardware
PCI9060_VECTOR = 0x0000006C
CPM_IRQ_BASE = 0x40
ERROR_VECTOR = CPM_IRQ_BASE * 4
SCC1_VECTOR = (CPM_IRQ_BASE + 0x1E) * 4
SCC2_VECTOR = (CPM_IRQ_BASE + 0x1D) * 4
SCC3_VECTOR = (CPM_IRQ_BASE + 0x1C) * 4
SCC4_VECTOR = (CPM_IRQ_BASE + 0x1B) * 4
CPM_IRQ_LEVEL = 4
TIMER_IRQ = 128
TIMER_IRQ_LEVEL = 4
PITR_CONST = 0x100 + 16 // 1 Hz timer
MBAR = 0x0003FF00
VALUE_WINDOW = 0x40000000
ORDER_WINDOW = 0xC0000000
PLX = 0xFFF90000
CSRA = 0xFFFB0000
CSRB = 0xFFFB0002
CSRC = 0xFFFB0004
CSRD = 0xFFFB0006
STATUS_CABLE_LL = 0x2000
STATUS_CABLE_DTR = 0x1000
DPRBASE = 0xFFFC0000
SCC1_BASE = DPRBASE + 0xC00
MISC_BASE = DPRBASE + 0xCB0
SCC2_BASE = DPRBASE + 0xD00
SCC3_BASE = DPRBASE + 0xE00
SCC4_BASE = DPRBASE + 0xF00
// offset from SCCx_BASE
// SCC_xBASE contain offsets from DPRBASE and must be divisible by 8
SCC_RBASE = 0 // 16-bit RxBD base address
SCC_TBASE = 2 // 16-bit TxBD base address
SCC_RFCR = 4 // 8-bit Rx function code
SCC_TFCR = 5 // 8-bit Tx function code
SCC_MRBLR = 6 // 16-bit maximum Rx buffer length
SCC_C_MASK = 0x34 // 32-bit CRC constant
SCC_C_PRES = 0x38 // 32-bit CRC preset
SCC_MFLR = 0x46 // 16-bit max Rx frame length (without flags)
REGBASE = DPRBASE + 0x1000
PICR = REGBASE + 0x026 // 16-bit periodic irq control
PITR = REGBASE + 0x02A // 16-bit periodic irq timing
OR1 = REGBASE + 0x064 // 32-bit RAM bank #1 options
CICR = REGBASE + 0x540 // 32(24)-bit CP interrupt config
CIMR = REGBASE + 0x548 // 32-bit CP interrupt mask
CISR = REGBASE + 0x54C // 32-bit CP interrupts in-service
PADIR = REGBASE + 0x550 // 16-bit PortA data direction bitmap
PAPAR = REGBASE + 0x552 // 16-bit PortA pin assignment bitmap
PAODR = REGBASE + 0x554 // 16-bit PortA open drain bitmap
PADAT = REGBASE + 0x556 // 16-bit PortA data register
PCDIR = REGBASE + 0x560 // 16-bit PortC data direction bitmap
PCPAR = REGBASE + 0x562 // 16-bit PortC pin assignment bitmap
PCSO = REGBASE + 0x564 // 16-bit PortC special options
PCDAT = REGBASE + 0x566 // 16-bit PortC data register
PCINT = REGBASE + 0x568 // 16-bit PortC interrupt control
CR = REGBASE + 0x5C0 // 16-bit Command register
SCC1_REGS = REGBASE + 0x600
SCC2_REGS = REGBASE + 0x620
SCC3_REGS = REGBASE + 0x640
SCC4_REGS = REGBASE + 0x660
SICR = REGBASE + 0x6EC // 32-bit SI clock route
// offset from SCCx_REGS
SCC_GSMR_L = 0x00 // 32 bits
SCC_GSMR_H = 0x04 // 32 bits
SCC_PSMR = 0x08 // 16 bits
SCC_TODR = 0x0C // 16 bits
SCC_DSR = 0x0E // 16 bits
SCC_SCCE = 0x10 // 16 bits
SCC_SCCM = 0x14 // 16 bits
SCC_SCCS = 0x17 // 8 bits
#if QUICC_MEMCPY_USES_PLX
.macro memcpy_from_pci src, dest, len // len must be < 8 MB
addl #3, \len
andl #0xFFFFFFFC, \len // always copy n * 4 bytes
movel \src, PLX_DMA_0_PCI
movel \dest, PLX_DMA_0_LOCAL
movel \len, PLX_DMA_0_LENGTH
movel #0x0103, PLX_DMA_CMD_STS // start channel 0 transfer
bsr memcpy_from_pci_run
.endm
.macro memcpy_to_pci src, dest, len
addl #3, \len
andl #0xFFFFFFFC, \len // always copy n * 4 bytes
movel \src, PLX_DMA_1_LOCAL
movel \dest, PLX_DMA_1_PCI
movel \len, PLX_DMA_1_LENGTH
movel #0x0301, PLX_DMA_CMD_STS // start channel 1 transfer
bsr memcpy_to_pci_run
.endm
#else
.macro memcpy src, dest, len // len must be < 65536 bytes
movel %d7, -(%sp) // src and dest must be < 256 MB
movel \len, %d7 // bits 0 and 1
lsrl #2, \len
andl \len, \len
beq 99f // only 0 - 3 bytes
subl #1, \len // for dbf
98: movel (\src)+, (\dest)+
dbfw \len, 98b
99: movel %d7, \len
btstl #1, \len
beq 99f
movew (\src)+, (\dest)+
99: btstl #0, \len
beq 99f
moveb (\src)+, (\dest)+
99:
movel (%sp)+, %d7
.endm
.macro memcpy_from_pci src, dest, len
addl #VALUE_WINDOW, \src
memcpy \src, \dest, \len
.endm
.macro memcpy_to_pci src, dest, len
addl #VALUE_WINDOW, \dest
memcpy \src, \dest, \len
.endm
#endif
.macro wait_for_command
99: btstl #0, CR
bne 99b
.endm
/****************************** card initialization *******************/
.text
.global _start
_start: bra init
.org _start + 4
ch_status_addr: .long 0, 0, 0, 0
rx_descs_addr: .long 0
init:
#if DETECT_RAM
movel OR1, %d0
andl #0xF00007FF, %d0 // mask AMxx bits
orl #0xFFFF800 & ~(MAX_RAM_SIZE - 1), %d0 // update RAM bank size
movel %d0, OR1
#endif
addl #VALUE_WINDOW, rx_descs_addr // PCI addresses of shared data
clrl %d0 // D0 = 4 * port
init_1: tstl ch_status_addr(%d0)
beq init_2
addl #VALUE_WINDOW, ch_status_addr(%d0)
init_2: addl #4, %d0
cmpl #4 * 4, %d0
bne init_1
movel #pci9060_interrupt, PCI9060_VECTOR
movel #error_interrupt, ERROR_VECTOR
movel #port_interrupt_1, SCC1_VECTOR
movel #port_interrupt_2, SCC2_VECTOR
movel #port_interrupt_3, SCC3_VECTOR
movel #port_interrupt_4, SCC4_VECTOR
movel #timer_interrupt, TIMER_IRQ * 4
movel #0x78000000, CIMR // only SCCx IRQs from CPM
movew #(TIMER_IRQ_LEVEL << 8) + TIMER_IRQ, PICR // interrupt from PIT
movew #PITR_CONST, PITR
// SCC1=SCCa SCC2=SCCb SCC3=SCCc SCC4=SCCd prio=4 HP=-1 IRQ=64-79
movel #0xD41F40 + (CPM_IRQ_LEVEL << 13), CICR
movel #0x543, PLX_DMA_0_MODE // 32-bit, Ready, Burst, IRQ
movel #0x543, PLX_DMA_1_MODE
movel #0x0, PLX_DMA_0_DESC // from PCI to local
movel #0x8, PLX_DMA_1_DESC // from local to PCI
movel #0x101, PLX_DMA_CMD_STS // enable both DMA channels
// enable local IRQ, DMA, doorbells and PCI IRQ
orl #0x000F0300, PLX_INTERRUPT_CS
#if DETECT_RAM
bsr ram_test
#else
movel #1, PLX_MAILBOX_5 // non-zero value = init complete
#endif
bsr check_csr
movew #0xFFFF, PAPAR // all pins are clocks/data
clrw PADIR // first function
clrw PCSO // CD and CTS always active
/****************************** main loop *****************************/
main: movel channel_stats, %d7 // D7 = doorbell + irq status
clrl channel_stats
tstl %d7
bne main_1
// nothing to do - wait for next event
stop #0x2200 // supervisor + IRQ level 2
movew #0x2700, %sr // disable IRQs again
bra main
main_1: clrl %d0 // D0 = 4 * port
clrl %d6 // D6 = doorbell to host value
main_l: btstl #DOORBELL_TO_CARD_CLOSE_0, %d7
beq main_op
bclrl #DOORBELL_TO_CARD_OPEN_0, %d7 // in case both bits are set
bsr close_port
main_op:
btstl #DOORBELL_TO_CARD_OPEN_0, %d7
beq main_cl
bsr open_port
main_cl:
btstl #DOORBELL_TO_CARD_TX_0, %d7
beq main_txend
bsr tx
main_txend:
btstl #TASK_SCC_0, %d7
beq main_next
bsr tx_end
bsr rx
main_next:
lsrl #1, %d7 // port status for next port
addl #4, %d0 // D0 = 4 * next port
cmpl #4 * 4, %d0
bne main_l
movel %d6, PLX_DOORBELL_FROM_CARD // signal the host
bra main
/****************************** open port *****************************/
open_port: // D0 = 4 * port, D6 = doorbell to host
movel ch_status_addr(%d0), %a0 // A0 = port status address
tstl STATUS_OPEN(%a0)
bne open_port_ret // port already open
movel #1, STATUS_OPEN(%a0) // confirm the port is open
// setup BDs
clrl tx_in(%d0)
clrl tx_out(%d0)
clrl tx_count(%d0)
clrl rx_in(%d0)
movel SICR, %d1 // D1 = clock settings in SICR
andl clocking_mask(%d0), %d1
cmpl #CLOCK_TXFROMRX, STATUS_CLOCKING(%a0)
bne open_port_clock_ext
orl clocking_txfromrx(%d0), %d1
bra open_port_set_clock
open_port_clock_ext:
orl clocking_ext(%d0), %d1
open_port_set_clock:
movel %d1, SICR // update clock settings in SICR
orw #STATUS_CABLE_DTR, csr_output(%d0) // DTR on
bsr check_csr // call with disabled timer interrupt
// Setup TX descriptors
movel first_buffer(%d0), %d1 // D1 = starting buffer address
movel tx_first_bd(%d0), %a1 // A1 = starting TX BD address
movel #TX_BUFFERS - 2, %d2 // D2 = TX_BUFFERS - 1 counter
movel #0x18000000, %d3 // D3 = initial TX BD flags: Int + Last
cmpl #PARITY_NONE, STATUS_PARITY(%a0)
beq open_port_tx_loop
bsetl #26, %d3 // TX BD flag: Transmit CRC
open_port_tx_loop:
movel %d3, (%a1)+ // TX flags + length
movel %d1, (%a1)+ // buffer address
addl #BUFFER_LENGTH, %d1
dbfw %d2, open_port_tx_loop
bsetl #29, %d3 // TX BD flag: Wrap (last BD)
movel %d3, (%a1)+ // Final TX flags + length
movel %d1, (%a1)+ // buffer address
// Setup RX descriptors // A1 = starting RX BD address
movel #RX_BUFFERS - 2, %d2 // D2 = RX_BUFFERS - 1 counter
open_port_rx_loop:
movel #0x90000000, (%a1)+ // RX flags + length
movel %d1, (%a1)+ // buffer address
addl #BUFFER_LENGTH, %d1
dbfw %d2, open_port_rx_loop
movel #0xB0000000, (%a1)+ // Final RX flags + length
movel %d1, (%a1)+ // buffer address
// Setup port parameters
movel scc_base_addr(%d0), %a1 // A1 = SCC_BASE address
movel scc_reg_addr(%d0), %a2 // A2 = SCC_REGS address
movel #0xFFFF, SCC_SCCE(%a2) // clear status bits
movel #0x0000, SCC_SCCM(%a2) // interrupt mask
movel tx_first_bd(%d0), %d1
movew %d1, SCC_TBASE(%a1) // D1 = offset of first TxBD
addl #TX_BUFFERS * 8, %d1
movew %d1, SCC_RBASE(%a1) // D1 = offset of first RxBD
moveb #0x8, SCC_RFCR(%a1) // Intel mode, 1000
moveb #0x8, SCC_TFCR(%a1)
// Parity settings
cmpl #PARITY_CRC16_PR1_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_1
clrw SCC_PSMR(%a2) // CRC16-CCITT
movel #0xF0B8, SCC_C_MASK(%a1)
movel #0xFFFF, SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
movew #2, parity_bytes(%d0)
bra open_port_2
open_port_parity_1:
cmpl #PARITY_CRC32_PR1_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_2
movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT
movel #0xDEBB20E3, SCC_C_MASK(%a1)
movel #0xFFFFFFFF, SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
movew #4, parity_bytes(%d0)
bra open_port_2
open_port_parity_2:
cmpl #PARITY_CRC16_PR0_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_3
clrw SCC_PSMR(%a2) // CRC16-CCITT preset 0
movel #0xF0B8, SCC_C_MASK(%a1)
clrl SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
movew #2, parity_bytes(%d0)
bra open_port_2
open_port_parity_3:
cmpl #PARITY_CRC32_PR0_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_4
movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT preset 0
movel #0xDEBB20E3, SCC_C_MASK(%a1)
clrl SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
movew #4, parity_bytes(%d0)
bra open_port_2
open_port_parity_4:
clrw SCC_PSMR(%a2) // no parity
movel #0xF0B8, SCC_C_MASK(%a1)
movel #0xFFFF, SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU, SCC_MFLR(%a1) // 0 bytes for CRC
clrw parity_bytes(%d0)
open_port_2:
movel #0x00000003, SCC_GSMR_H(%a2) // RTSM
cmpl #ENCODING_NRZI, STATUS_ENCODING(%a0)
bne open_port_nrz
movel #0x10040900, SCC_GSMR_L(%a2) // NRZI: TCI Tend RECN+TENC=1
bra open_port_3
open_port_nrz:
movel #0x10040000, SCC_GSMR_L(%a2) // NRZ: TCI Tend RECN+TENC=0
open_port_3:
movew #BUFFER_LENGTH, SCC_MRBLR(%a1)
movel %d0, %d1
lsll #4, %d1 // D1 bits 7 and 6 = port
orl #1, %d1
movew %d1, CR // Init SCC RX and TX params
wait_for_command
// TCI Tend ENR ENT
movew #0x001F, SCC_SCCM(%a2) // TXE RXF BSY TXB RXB interrupts
orl #0x00000030, SCC_GSMR_L(%a2) // enable SCC
open_port_ret:
rts
/****************************** close port ****************************/
close_port: // D0 = 4 * port, D6 = doorbell to host
movel scc_reg_addr(%d0), %a0 // A0 = SCC_REGS address
clrw SCC_SCCM(%a0) // no SCC interrupts
andl #0xFFFFFFCF, SCC_GSMR_L(%a0) // Disable ENT and ENR
andw #~STATUS_CABLE_DTR, csr_output(%d0) // DTR off
bsr check_csr // call with disabled timer interrupt
movel ch_status_addr(%d0), %d1
clrl STATUS_OPEN(%d1) // confirm the port is closed
rts
/****************************** transmit packet ***********************/
// queue packets for transmission
tx: // D0 = 4 * port, D6 = doorbell to host
cmpl #TX_BUFFERS, tx_count(%d0)
beq tx_ret // all DB's = descs in use
movel tx_out(%d0), %d1
movel %d1, %d2 // D1 = D2 = tx_out BD# = desc#
mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
addl ch_status_addr(%d0), %d2
addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
cmpl #PACKET_FULL, (%d2) // desc status
bne tx_ret
// queue it
movel 4(%d2), %a0 // PCI address
lsll #3, %d1 // BD is 8-bytes long
addl tx_first_bd(%d0), %d1 // D1 = current tx_out BD addr
movel 4(%d1), %a1 // A1 = dest address
movel 8(%d2), %d2 // D2 = length
movew %d2, 2(%d1) // length into BD
memcpy_from_pci %a0, %a1, %d2
bsetl #31, (%d1) // CP go ahead
// update tx_out and tx_count
movel tx_out(%d0), %d1
addl #1, %d1
cmpl #TX_BUFFERS, %d1
bne tx_1
clrl %d1
tx_1: movel %d1, tx_out(%d0)
addl #1, tx_count(%d0)
bra tx
tx_ret: rts
/****************************** packet received ***********************/
// Service receive buffers // D0 = 4 * port, D6 = doorbell to host
rx: movel rx_in(%d0), %d1 // D1 = rx_in BD#
lsll #3, %d1 // BD is 8-bytes long
addl rx_first_bd(%d0), %d1 // D1 = current rx_in BD address
movew (%d1), %d2 // D2 = RX BD flags
btstl #15, %d2
bne rx_ret // BD still empty
btstl #1, %d2
bne rx_overrun
tstw parity_bytes(%d0)
bne rx_parity
bclrl #2, %d2 // do not test for CRC errors
rx_parity:
andw #0x0CBC, %d2 // mask status bits
cmpw #0x0C00, %d2 // correct frame
bne rx_bad_frame
clrl %d3
movew 2(%d1), %d3
subw parity_bytes(%d0), %d3 // D3 = packet length
cmpw #HDLC_MAX_MRU, %d3
bgt rx_bad_frame
rx_good_frame:
movel rx_out, %d2
mulul #DESC_LENGTH, %d2
addl rx_descs_addr, %d2 // D2 = RX desc address
cmpl #PACKET_EMPTY, (%d2) // desc stat
bne rx_overrun
movel %d3, 8(%d2)
movel 4(%d1), %a0 // A0 = source address
movel 4(%d2), %a1
tstl %a1
beq rx_ignore_data
memcpy_to_pci %a0, %a1, %d3
rx_ignore_data:
movel packet_full(%d0), (%d2) // update desc stat
// update D6 and rx_out
bsetl #DOORBELL_FROM_CARD_RX, %d6 // signal host that RX completed
movel rx_out, %d2
addl #1, %d2
cmpl #RX_QUEUE_LENGTH, %d2
bne rx_1
clrl %d2
rx_1: movel %d2, rx_out
rx_free_bd:
andw #0xF000, (%d1) // clear CM and error bits
bsetl #31, (%d1) // free BD
// update rx_in
movel rx_in(%d0), %d1
addl #1, %d1
cmpl #RX_BUFFERS, %d1
bne rx_2
clrl %d1
rx_2: movel %d1, rx_in(%d0)
bra rx
rx_overrun:
movel ch_status_addr(%d0), %d2
addl #1, STATUS_RX_OVERRUNS(%d2)
bra rx_free_bd
rx_bad_frame:
movel ch_status_addr(%d0), %d2
addl #1, STATUS_RX_FRAME_ERRORS(%d2)
bra rx_free_bd
rx_ret: rts
/****************************** packet transmitted ********************/
// Service transmit buffers // D0 = 4 * port, D6 = doorbell to host
tx_end: tstl tx_count(%d0)
beq tx_end_ret // TX buffers already empty
movel tx_in(%d0), %d1
movel %d1, %d2 // D1 = D2 = tx_in BD# = desc#
lsll #3, %d1 // BD is 8-bytes long
addl tx_first_bd(%d0), %d1 // D1 = current tx_in BD address
movew (%d1), %d3 // D3 = TX BD flags
btstl #15, %d3
bne tx_end_ret // BD still being transmitted
// update D6, tx_in and tx_count
orl bell_tx(%d0), %d6 // signal host that TX desc freed
subl #1, tx_count(%d0)
movel tx_in(%d0), %d1
addl #1, %d1
cmpl #TX_BUFFERS, %d1
bne tx_end_1
clrl %d1
tx_end_1:
movel %d1, tx_in(%d0)
// free host's descriptor
mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
addl ch_status_addr(%d0), %d2
addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
btstl #1, %d3
bne tx_end_underrun
movel #PACKET_SENT, (%d2)
bra tx_end
tx_end_underrun:
movel #PACKET_UNDERRUN, (%d2)
bra tx_end
tx_end_ret: rts
/****************************** PLX PCI9060 DMA memcpy ****************/
#if QUICC_MEMCPY_USES_PLX
// called with interrupts disabled
memcpy_from_pci_run:
movel %d0, -(%sp)
movew %sr, -(%sp)
memcpy_1:
movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
btstl #4, %d0 // transfer done?
bne memcpy_end
stop #0x2200 // enable PCI9060 interrupts
movew #0x2700, %sr // disable interrupts again
bra memcpy_1
memcpy_to_pci_run:
movel %d0, -(%sp)
movew %sr, -(%sp)
memcpy_2:
movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
btstl #12, %d0 // transfer done?
bne memcpy_end
stop #0x2200 // enable PCI9060 interrupts
movew #0x2700, %sr // disable interrupts again
bra memcpy_2
memcpy_end:
movew (%sp)+, %sr
movel (%sp)+, %d0
rts
#endif
/****************************** PLX PCI9060 interrupt *****************/
pci9060_interrupt:
movel %d0, -(%sp)
movel PLX_DOORBELL_TO_CARD, %d0
movel %d0, PLX_DOORBELL_TO_CARD // confirm all requests
orl %d0, channel_stats
movel #0x0909, PLX_DMA_CMD_STS // clear DMA ch #0 and #1 interrupts
movel (%sp)+, %d0
rte
/****************************** SCC interrupts ************************/
port_interrupt_1:
orl #0, SCC1_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_0, channel_stats
movel #0x40000000, CISR
rte
port_interrupt_2:
orl #0, SCC2_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_1, channel_stats
movel #0x20000000, CISR
rte
port_interrupt_3:
orl #0, SCC3_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_2, channel_stats
movel #0x10000000, CISR
rte
port_interrupt_4:
orl #0, SCC4_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_3, channel_stats
movel #0x08000000, CISR
rte
error_interrupt:
rte
/****************************** cable and PM routine ******************/
// modified registers: none
check_csr:
movel %d0, -(%sp)
movel %d1, -(%sp)
movel %d2, -(%sp)
movel %a0, -(%sp)
movel %a1, -(%sp)
clrl %d0 // D0 = 4 * port
movel #CSRA, %a0 // A0 = CSR address
check_csr_loop:
movew (%a0), %d1 // D1 = CSR input bits
andl #0xE7, %d1 // PM and cable sense bits (no DCE bit)
cmpw #STATUS_CABLE_V35 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_1
movew #0x0E08, %d1
bra check_csr_valid
check_csr_1:
cmpw #STATUS_CABLE_X21 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_2
movew #0x0408, %d1
bra check_csr_valid
check_csr_2:
cmpw #STATUS_CABLE_V24 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_3
movew #0x0208, %d1
bra check_csr_valid
check_csr_3:
cmpw #STATUS_CABLE_EIA530 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_disable
movew #0x0D08, %d1
bra check_csr_valid
check_csr_disable:
movew #0x0008, %d1 // D1 = disable everything
movew #0x80E7, %d2 // D2 = input mask: ignore DSR
bra check_csr_write
check_csr_valid: // D1 = mode and IRQ bits
movew csr_output(%d0), %d2
andw #0x3000, %d2 // D2 = requested LL and DTR bits
orw %d2, %d1 // D1 = all requested output bits
movew #0x80FF, %d2 // D2 = input mask: include DSR
check_csr_write:
cmpw old_csr_output(%d0), %d1
beq check_csr_input
movew %d1, old_csr_output(%d0)
movew %d1, (%a0) // Write CSR output bits
check_csr_input:
movew (PCDAT), %d1
andw dcd_mask(%d0), %d1
beq check_csr_dcd_on // DCD and CTS signals are negated
movew (%a0), %d1 // D1 = CSR input bits
andw #~STATUS_CABLE_DCD, %d1 // DCD off
bra check_csr_previous
check_csr_dcd_on:
movew (%a0), %d1 // D1 = CSR input bits
orw #STATUS_CABLE_DCD, %d1 // DCD on
check_csr_previous:
andw %d2, %d1 // input mask
movel ch_status_addr(%d0), %a1
cmpl STATUS_CABLE(%a1), %d1 // check for change
beq check_csr_next
movel %d1, STATUS_CABLE(%a1) // update status
movel bell_cable(%d0), PLX_DOORBELL_FROM_CARD // signal the host
check_csr_next:
addl #2, %a0 // next CSR register
addl #4, %d0 // D0 = 4 * next port
cmpl #4 * 4, %d0
bne check_csr_loop
movel (%sp)+, %a1
movel (%sp)+, %a0
movel (%sp)+, %d2
movel (%sp)+, %d1
movel (%sp)+, %d0
rts
/****************************** timer interrupt ***********************/
timer_interrupt:
bsr check_csr
rte
/****************************** RAM sizing and test *******************/
#if DETECT_RAM
ram_test:
movel #0x12345678, %d1 // D1 = test value
movel %d1, (128 * 1024 - 4)
movel #128 * 1024, %d0 // D0 = RAM size tested
ram_test_size:
cmpl #MAX_RAM_SIZE, %d0
beq ram_test_size_found
movel %d0, %a0
addl #128 * 1024 - 4, %a0
cmpl (%a0), %d1
beq ram_test_size_check
ram_test_next_size:
lsll #1, %d0
bra ram_test_size
ram_test_size_check:
eorl #0xFFFFFFFF, %d1
movel %d1, (128 * 1024 - 4)
cmpl (%a0), %d1
bne ram_test_next_size
ram_test_size_found: // D0 = RAM size
movel %d0, %a0 // A0 = fill ptr
subl #firmware_end + 4, %d0
lsrl #2, %d0
movel %d0, %d1 // D1 = DBf counter
ram_test_fill:
movel %a0, -(%a0)
dbfw %d1, ram_test_fill
subl #0x10000, %d1
cmpl #0xFFFFFFFF, %d1
bne ram_test_fill
ram_test_loop: // D0 = DBf counter
cmpl (%a0)+, %a0
dbnew %d0, ram_test_loop
bne ram_test_found_bad
subl #0x10000, %d0
cmpl #0xFFFFFFFF, %d0
bne ram_test_loop
bra ram_test_all_ok
ram_test_found_bad:
subl #4, %a0
ram_test_all_ok:
movel %a0, PLX_MAILBOX_5
rts
#endif
/****************************** constants *****************************/
scc_reg_addr:
.long SCC1_REGS, SCC2_REGS, SCC3_REGS, SCC4_REGS
scc_base_addr:
.long SCC1_BASE, SCC2_BASE, SCC3_BASE, SCC4_BASE
tx_first_bd:
.long DPRBASE
.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8
.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
rx_first_bd:
.long DPRBASE + TX_BUFFERS * 8
.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8
.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
first_buffer:
.long BUFFERS_ADDR
.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH
.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 2
.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 3
bell_tx:
.long 1 << DOORBELL_FROM_CARD_TX_0, 1 << DOORBELL_FROM_CARD_TX_1
.long 1 << DOORBELL_FROM_CARD_TX_2, 1 << DOORBELL_FROM_CARD_TX_3
bell_cable:
.long 1 << DOORBELL_FROM_CARD_CABLE_0, 1 << DOORBELL_FROM_CARD_CABLE_1
.long 1 << DOORBELL_FROM_CARD_CABLE_2, 1 << DOORBELL_FROM_CARD_CABLE_3
packet_full:
.long PACKET_FULL, PACKET_FULL + 1, PACKET_FULL + 2, PACKET_FULL + 3
clocking_ext:
.long 0x0000002C, 0x00003E00, 0x002C0000, 0x3E000000
clocking_txfromrx:
.long 0x0000002D, 0x00003F00, 0x002D0000, 0x3F000000
clocking_mask:
.long 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
dcd_mask:
.word 0x020, 0, 0x080, 0, 0x200, 0, 0x800
.ascii "wanXL firmware\n"
.asciz "Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>\n"
/****************************** variables *****************************/
.align 4
channel_stats: .long 0
tx_in: .long 0, 0, 0, 0 // transmitted
tx_out: .long 0, 0, 0, 0 // received from host for transmission
tx_count: .long 0, 0, 0, 0 // currently in transmit queue
rx_in: .long 0, 0, 0, 0 // received from port
rx_out: .long 0 // transmitted to host
parity_bytes: .word 0, 0, 0, 0, 0, 0, 0 // only 4 words are used
csr_output: .word 0
old_csr_output: .word 0, 0, 0, 0, 0, 0, 0
.align 4
firmware_end: // must be dword-aligned
|
AjayMT/mako
| 3,197
|
src/kernel/interrupt.s
|
; interrupt.s
;
; Interrupt handling interface for Mako.
;
; Author: Ajay Tatachar <ajaymt2@illinois.edu>
global enable_interrupts
global disable_interrupts
global interrupt_save_disable
global interrupt_restore
extern forward_interrupt
; Declare an interupt handler that discards the error code.
%macro no_error_code_handler 1
global interrupt_handler_%1
interrupt_handler_%1:
cli
push dword 0 ; Push 0 as the error code.
push dword %1 ; Push the interrupt number.
jmp common_interrupt_handler
%endmacro
; Declare an interrupt handler that preserves the error code
; (which is already on the stack).
%macro error_code_handler 1
global interrupt_handler_%1
interrupt_handler_%1:
cli
push dword %1 ; Push the interrupt number
jmp common_interrupt_handler
%endmacro
section .text
; Interrupt handlers
; Protected mode exceptions
no_error_code_handler 0 ; Divide by zero
no_error_code_handler 1 ; Debug exceptopn
no_error_code_handler 2 ; Non-maskable interrupt
no_error_code_handler 3 ; Breakpoint exception
no_error_code_handler 4 ; "Into detected overflow"
no_error_code_handler 5 ; Out of bounds exception
no_error_code_handler 6 ; Invalid opcode exception
no_error_code_handler 7 ; No coprocessor exception
error_code_handler 8 ; Double fault
no_error_code_handler 9 ; Coprocessor segment overrun
error_code_handler 10 ; Bad TSS
error_code_handler 11 ; Segment not present
error_code_handler 12 ; Stack fault
error_code_handler 13 ; General protection fault
error_code_handler 14 ; Page fault
no_error_code_handler 15 ; Unknown interrupt exception
no_error_code_handler 16 ; Coprocessor fault
error_code_handler 17 ; Alignment check exception
no_error_code_handler 18 ; Machine check exception
no_error_code_handler 19 ; SIMD floating point exception
; IRQs
no_error_code_handler 32
no_error_code_handler 33
no_error_code_handler 34
no_error_code_handler 35
no_error_code_handler 36
no_error_code_handler 37
no_error_code_handler 38
no_error_code_handler 39
no_error_code_handler 40
no_error_code_handler 41
no_error_code_handler 42
no_error_code_handler 43
no_error_code_handler 44
no_error_code_handler 45
no_error_code_handler 46
no_error_code_handler 47
; Common parts of the interrupt handlers.
; Pushes register state to the stack, forwards the interrupt
; to an interrupt_handler_t and restores register state.
common_interrupt_handler:
pushad
mov ax, ds
push eax
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
call forward_interrupt
pop eax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
popad
add esp, 8
sti
iret
enable_interrupts:
sti
ret
disable_interrupts:
cli
ret
interrupt_save_disable:
pushfd
pop eax
cli
ret
interrupt_restore:
mov eax, [esp + 4]
push eax
popfd
ret
|
AjayMT/mako
| 2,599
|
src/kernel/boot.s
|
; boot.s
;
; Kernel loader.
;
; Author: Ajay Tatachar <ajaymt2@illinois.edu>
%include "constants.s"
global loader ; entry symbol for ELF
global kernel_stack ; address of the kernel stack
extern kmain ; C entry point
extern kernel_start ; start address of the kernel exported from link.ld
extern kernel_end ; end address of the kernel exported from link.ld
MOD_ALIGN equ 1 ; align loaded modules on page boundaries
MEMINFO equ 2 ; provide memory map
GRAPHICS equ 4 ; Set graphics mode
FLAGS equ MOD_ALIGN | MEMINFO | GRAPHICS
MAGIC equ 0x1BADB002 ; magic constant for multiboot
CHECKSUM equ -(MAGIC + FLAGS) ; magic + checksum + flags = 0
section .data
align 4096
kernel_pd:
; First entry in the page directory is the identity map.
; Identity mapping is necessary so that the instruction pointer
; doesn't point to an invalid address after paging is enabled.
dd 10000011b
times (KERNEL_PD_IDX - 1) dd 0
; This entry maps the kernel.
dd 10000011b
times (1023 - KERNEL_PD_IDX) dd 0
section .bss
align 4
kernel_stack:
resb KERNEL_STACK_SIZE
section .multiboot
align 4
dd MAGIC
dd FLAGS
dd CHECKSUM
dd 0
dd 0
dd 0
dd 0
dd 0
; Request linear graphics mode.
dd 0
dd 0
dd 0
dd 32
section .text
loader: ; entry point called by GRUB
; We don't need to do anything in here for now.
enable_paging:
mov ecx, (kernel_pd - KERNEL_START_VADDR)
and ecx, 0xFFFFF000 ; discard all but the upper 20 bits
mov cr3, ecx ; load pdt
mov ecx, cr4 ; read current config
or ecx, 0x10 ; enable 4MB pages
mov cr4, ecx ; write config
mov ecx, cr0 ; read current config
or ecx, 0x80010000 ; enable paging and write protection
mov cr0, ecx ; write config
lea ecx, [higher_half]
jmp ecx ; absolute jump to the higher half
higher_half: ; at this point we are using the page table
mov [kernel_pd], DWORD 0 ; stop identity mapping the first 4 MB
invlpg [0] ; flush TLB
fwd_kmain:
mov esp, kernel_stack + KERNEL_STACK_SIZE
push kernel_end
push kernel_start
push kernel_pd
push eax
add ebx, KERNEL_START_VADDR
push ebx
call kmain
jmp $ ; loop indefinitely
|
AjayMT/mako
| 1,029
|
src/kernel/io.s
|
; io.s
;
; Serial port I/O interface for Mako.
;
; Author: Ajay Tatachar <ajaymt2@illinois.edu>
global outb
global outw
global outl
global inb
global inw
global inl
section .text
; outb -- send a byte to an I/O port
; stack: [esp + 8] the byte
; [esp + 4] the I/O port
; [esp ] return address
outb:
mov al, [esp + 8]
mov dx, [esp + 4]
out dx, al
ret
; inb -- get a byte from an I/O port
; stack: [esp + 4] the address of the I/O port
; [esp ] the return address
inb:
mov dx, [esp + 4]
in al, dx
ret
; outw -- send a WORD to an I/O port.
outw:
mov ax, [esp + 8]
mov dx, [esp + 4]
out dx, ax
ret
; inw -- get a WORD from an I/O port.
inw:
mov dx, [esp + 4]
in ax, dx
ret
; outl -- send a DWORD to an I/O port.
outl:
mov eax, [esp + 8]
mov dx, [esp + 4]
out dx, eax
ret
; inl -- get a DWORD from an I/O port.
inl:
mov dx, [esp + 4]
in eax, dx
ret
|
AjayMT/mako
| 1,342
|
src/kernel/process.s
|
; process.s
;
; Process management and user mode.
;
; Author: Ajay Tatachar <ajaymt2@illinois.edu>
global resume_user
global resume_kernel
section .text
resume_user:
cli
mov eax, [esp + 4] ; store address of registers struct in eax
; restore segment selectors except for cs and ss
mov cx, [eax + 28]
mov ds, cx
mov gs, cx
mov es, cx
mov fs, cx
; pushing stuff for iret
push dword [eax + 28] ; ss
push dword [eax + 32] ; esp
push dword [eax + 36] ; eflags
push dword [eax + 40] ; segment selector
push dword [eax + 44] ; eip
; restore registers
mov ebx, [eax + 4]
mov ecx, [eax + 8]
mov edx, [eax + 12]
mov ebp, [eax + 16]
mov esi, [eax + 20]
mov edi, [eax + 24]
mov eax, [eax]
iret
resume_kernel:
cli
mov eax, [esp + 4] ; store address of registers struct in eax
mov esp, [eax + 32] ; restore esp
; pushing stuff for iret
push dword [eax + 36] ; eflags
push dword [eax + 40] ; segment selector
push dword [eax + 44] ; eip
; restore registers
mov ebx, [eax + 4]
mov ecx, [eax + 8]
mov edx, [eax + 12]
mov ebp, [eax + 16]
mov esi, [eax + 20]
mov edi, [eax + 24]
mov eax, [eax]
iret
|
AjayMT/mako
| 1,059
|
src/libc/setjmp.s
|
; setjmp.s
;
; Non-local jumping.
;
; Taken from ToAruOS <http://github.com/klange/toaruos>
global setjmp
global longjmp
section .text
setjmp:
push ebp
mov ebp, esp
push edi
mov edi, [ebp + 8] ; edi points to jmp_buf
; save some registers
mov [edi], eax
mov [edi + 4], ebx
mov [edi + 8], ecx
mov [edi + 12], edx
mov [edi + 16], esi
; save edi
mov eax, [ebp - 4]
mov [edi + 20], eax
; save ebp
mov eax, [ebp]
mov [edi + 24], eax
mov eax, esp
add eax, 12
mov [edi + 28], eax
mov eax, [ebp + 4]
mov [edi + 32], eax
pop edi
mov eax, 0
leave
ret
longjmp:
push ebp
mov ebp, esp
mov edi, [ebp + 8]
mov eax, [ebp + 12]
test eax, eax
jne .zero
inc eax
.zero:
mov [edi], eax
mov ebp, [edi + 24]
mov esp, [edi + 28]
push dword [edi + 32]
mov eax, [edi]
mov ebx, [edi + 4]
mov ecx, [edi + 8]
mov edx, [edi + 12]
mov esi, [edi + 16]
mov edi, [edi + 20]
ret
|
aixcc-public/challenge-001-exemplar-source
| 3,174
|
drivers/scsi/arm/acornscsi-io.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/drivers/acorn/scsi/acornscsi-io.S: Acorn SCSI card IO
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#if defined(__APCS_32__)
#define LOADREGS(t,r,l...) ldm##t r, l
#elif defined(__APCS_26__)
#define LOADREGS(t,r,l...) ldm##t r, l##^
#endif
@ Purpose: transfer a block of data from the acorn scsi card to memory
@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length)
@ Returns: nothing
.align
ENTRY(__acornscsi_in)
stmfd sp!, {r4 - r7, lr}
bic r0, r0, #3
mov lr, #0xff
orr lr, lr, #0xff00
acornscsi_in16lp:
subs r2, r2, #16
bmi acornscsi_in8
ldmia r0!, {r3, r4, r5, r6}
and r3, r3, lr
orr r3, r3, r4, lsl #16
and r4, r5, lr
orr r4, r4, r6, lsl #16
ldmia r0!, {r5, r6, r7, ip}
and r5, r5, lr
orr r5, r5, r6, lsl #16
and r6, r7, lr
orr r6, r6, ip, lsl #16
stmia r1!, {r3 - r6}
bne acornscsi_in16lp
LOADREGS(fd, sp!, {r4 - r7, pc})
acornscsi_in8: adds r2, r2, #8
bmi acornscsi_in4
ldmia r0!, {r3, r4, r5, r6}
and r3, r3, lr
orr r3, r3, r4, lsl #16
and r4, r5, lr
orr r4, r4, r6, lsl #16
stmia r1!, {r3 - r4}
LOADREGS(eqfd, sp!, {r4 - r7, pc})
sub r2, r2, #8
acornscsi_in4: adds r2, r2, #4
bmi acornscsi_in2
ldmia r0!, {r3, r4}
and r3, r3, lr
orr r3, r3, r4, lsl #16
str r3, [r1], #4
LOADREGS(eqfd, sp!, {r4 - r7, pc})
sub r2, r2, #4
acornscsi_in2: adds r2, r2, #2
ldr r3, [r0], #4
and r3, r3, lr
strb r3, [r1], #1
mov r3, r3, lsr #8
strplb r3, [r1], #1
LOADREGS(fd, sp!, {r4 - r7, pc})
@ Purpose: transfer a block of data from memory to the acorn scsi card
@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length)
@ Returns: nothing
ENTRY(__acornscsi_out)
stmfd sp!, {r4 - r6, lr}
bic r0, r0, #3
acornscsi_out16lp:
subs r2, r2, #16
bmi acornscsi_out8
ldmia r1!, {r4, r6, ip, lr}
mov r3, r4, lsl #16
orr r3, r3, r3, lsr #16
mov r4, r4, lsr #16
orr r4, r4, r4, lsl #16
mov r5, r6, lsl #16
orr r5, r5, r5, lsr #16
mov r6, r6, lsr #16
orr r6, r6, r6, lsl #16
stmia r0!, {r3, r4, r5, r6}
mov r3, ip, lsl #16
orr r3, r3, r3, lsr #16
mov r4, ip, lsr #16
orr r4, r4, r4, lsl #16
mov ip, lr, lsl #16
orr ip, ip, ip, lsr #16
mov lr, lr, lsr #16
orr lr, lr, lr, lsl #16
stmia r0!, {r3, r4, ip, lr}
bne acornscsi_out16lp
LOADREGS(fd, sp!, {r4 - r6, pc})
acornscsi_out8: adds r2, r2, #8
bmi acornscsi_out4
ldmia r1!, {r4, r6}
mov r3, r4, lsl #16
orr r3, r3, r3, lsr #16
mov r4, r4, lsr #16
orr r4, r4, r4, lsl #16
mov r5, r6, lsl #16
orr r5, r5, r5, lsr #16
mov r6, r6, lsr #16
orr r6, r6, r6, lsl #16
stmia r0!, {r3, r4, r5, r6}
LOADREGS(eqfd, sp!, {r4 - r6, pc})
sub r2, r2, #8
acornscsi_out4: adds r2, r2, #4
bmi acornscsi_out2
ldr r4, [r1], #4
mov r3, r4, lsl #16
orr r3, r3, r3, lsr #16
mov r4, r4, lsr #16
orr r4, r4, r4, lsl #16
stmia r0!, {r3, r4}
LOADREGS(eqfd, sp!, {r4 - r6, pc})
sub r2, r2, #4
acornscsi_out2: adds r2, r2, #2
ldr r3, [r1], #2
strb r3, [r0], #1
mov r3, r3, lsr #8
strplb r3, [r0], #1
LOADREGS(fd, sp!, {r4 - r6, pc})
|
aixcc-public/challenge-001-exemplar-source
| 3,303
|
drivers/firmware/efi/libstub/zboot-header.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/pe.h>
#ifdef CONFIG_64BIT
.set .Lextra_characteristics, 0x0
.set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
#else
.set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
.set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
#endif
.section ".head", "a"
.globl __efistub_efi_zboot_header
__efistub_efi_zboot_header:
.Ldoshdr:
.long MZ_MAGIC
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
.long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
.long 0, 0 // reserved
.asciz COMP_TYPE // compression type
.org .Ldoshdr + 0x3c
.long .Lpehdr - .Ldoshdr // PE header offset
.Lpehdr:
.long PE_MAGIC
.short MACHINE_TYPE
.short .Lsection_count
.long 0
.long 0
.long 0
.short .Lsection_table - .Loptional_header
.short IMAGE_FILE_DEBUG_STRIPPED | \
IMAGE_FILE_EXECUTABLE_IMAGE | \
IMAGE_FILE_LINE_NUMS_STRIPPED |\
.Lextra_characteristics
.Loptional_header:
.short .Lpe_opt_magic
.byte 0, 0
.long _etext - .Lefi_header_end
.long __data_size
.long 0
.long __efistub_efi_zboot_entry - .Ldoshdr
.long .Lefi_header_end - .Ldoshdr
#ifdef CONFIG_64BIT
.quad 0
#else
.long _etext - .Ldoshdr, 0x0
#endif
.long 4096
.long 512
.short 0, 0
.short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion
.short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion
.short 0, 0
.long 0
.long _end - .Ldoshdr
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
.short 0
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
.long 0, 0, 0, 0
#endif
.long 0
.long (.Lsection_table - .) / 8
.quad 0 // ExportTable
.quad 0 // ImportTable
.quad 0 // ResourceTable
.quad 0 // ExceptionTable
.quad 0 // CertificationTable
.quad 0 // BaseRelocationTable
#ifdef CONFIG_DEBUG_EFI
.long .Lefi_debug_table - .Ldoshdr // DebugTable
.long .Lefi_debug_table_size
#endif
.Lsection_table:
.ascii ".text\0\0\0"
.long _etext - .Lefi_header_end
.long .Lefi_header_end - .Ldoshdr
.long _etext - .Lefi_header_end
.long .Lefi_header_end - .Ldoshdr
.long 0, 0
.short 0, 0
.long IMAGE_SCN_CNT_CODE | \
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE
.ascii ".data\0\0\0"
.long __data_size
.long _etext - .Ldoshdr
.long __data_rawsize
.long _etext - .Ldoshdr
.long 0, 0
.short 0, 0
.long IMAGE_SCN_CNT_INITIALIZED_DATA | \
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_WRITE
.set .Lsection_count, (. - .Lsection_table) / 40
#ifdef CONFIG_DEBUG_EFI
.section ".rodata", "a"
.align 2
.Lefi_debug_table:
// EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
.long 0 // Characteristics
.long 0 // TimeDateStamp
.short 0 // MajorVersion
.short 0 // MinorVersion
.long IMAGE_DEBUG_TYPE_CODEVIEW // Type
.long .Lefi_debug_entry_size // SizeOfData
.long 0 // RVA
.long .Lefi_debug_entry - .Ldoshdr // FileOffset
.set .Lefi_debug_table_size, . - .Lefi_debug_table
.previous
.Lefi_debug_entry:
// EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
.ascii "NB10" // Signature
.long 0 // Unknown
.long 0 // Unknown2
.long 0 // Unknown3
.asciz ZBOOT_EFI_PATH
.set .Lefi_debug_entry_size, . - .Lefi_debug_entry
#endif
.p2align 12
.Lefi_header_end:
|
aixcc-public/challenge-001-exemplar-source
| 1,282
|
tools/edid/1680x1050.S
|
/*
1680x1050.S: EDID data set for standard 1680x1050 60 Hz monitor
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 146250 /* kHz */
#define XPIX 1680
#define YPIX 1050
#define XY_RATIO XY_RATIO_16_10
#define XBLANK 560
#define YBLANK 39
#define XOFFSET 104
#define XPULSE 176
#define YOFFSET 3
#define YPULSE 6
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux WSXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
aixcc-public/challenge-001-exemplar-source
| 1,128
|
tools/edid/800x600.S
|
/*
800x600.S: EDID data set for standard 800x600 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
Copyright (C) 2014 Linaro Limited
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 40000 /* kHz */
#define XPIX 800
#define YPIX 600
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 256
#define YBLANK 28
#define XOFFSET 40
#define XPULSE 128
#define YOFFSET 1
#define YPULSE 4
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SVGA"
#define ESTABLISHED_TIMING1_BITS 0x01 /* Bit 0: 800x600 @ 60Hz */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
aixcc-public/challenge-001-exemplar-source
| 1,278
|
tools/edid/1600x1200.S
|
/*
1600x1200.S: EDID data set for standard 1600x1200 60 Hz monitor
Copyright (C) 2013 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 162000 /* kHz */
#define XPIX 1600
#define YPIX 1200
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 560
#define YBLANK 50
#define XOFFSET 64
#define XPULSE 192
#define YOFFSET 1
#define YPULSE 3
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux UXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
aixcc-public/challenge-001-exemplar-source
| 1,278
|
tools/edid/1280x1024.S
|
/*
1280x1024.S: EDID data set for standard 1280x1024 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 108000 /* kHz */
#define XPIX 1280
#define YPIX 1024
#define XY_RATIO XY_RATIO_5_4
#define XBLANK 408
#define YBLANK 42
#define XOFFSET 48
#define XPULSE 112
#define YOFFSET 1
#define YPULSE 3
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
aixcc-public/challenge-001-exemplar-source
| 9,772
|
tools/edid/edid.S
|
/*
edid.S: EDID data template
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Manufacturer */
#define MFG_LNX1 'L'
#define MFG_LNX2 'N'
#define MFG_LNX3 'X'
#define SERIAL 0
#define YEAR 2012
#define WEEK 5
/* EDID 1.3 standard definitions */
#define XY_RATIO_16_10 0b00
#define XY_RATIO_4_3 0b01
#define XY_RATIO_5_4 0b10
#define XY_RATIO_16_9 0b11
/* Provide defaults for the timing bits */
#ifndef ESTABLISHED_TIMING1_BITS
#define ESTABLISHED_TIMING1_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING2_BITS
#define ESTABLISHED_TIMING2_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING3_BITS
#define ESTABLISHED_TIMING3_BITS 0x00
#endif
#define mfgname2id(v1,v2,v3) \
((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f))
#define swap16(v1) ((v1>>8)+((v1&0xff)<<8))
#define lsbs2(v1,v2) (((v1&0x0f)<<4)+(v2&0x0f))
#define msbs2(v1,v2) ((((v1>>8)&0x0f)<<4)+((v2>>8)&0x0f))
#define msbs4(v1,v2,v3,v4) \
((((v1>>8)&0x03)<<6)+(((v2>>8)&0x03)<<4)+\
(((v3>>4)&0x03)<<2)+((v4>>4)&0x03))
#define pixdpi2mm(pix,dpi) ((pix*25)/dpi)
#define xsize pixdpi2mm(XPIX,DPI)
#define ysize pixdpi2mm(YPIX,DPI)
.data
/* Fixed header pattern */
header: .byte 0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x00
mfg_id: .hword swap16(mfgname2id(MFG_LNX1, MFG_LNX2, MFG_LNX3))
prod_code: .hword 0
/* Serial number. 32 bits, little endian. */
serial_number: .long SERIAL
/* Week of manufacture */
week: .byte WEEK
/* Year of manufacture, less 1990. (1990-2245)
If week=255, it is the model year instead */
year: .byte YEAR-1990
version: .byte VERSION /* EDID version, usually 1 (for 1.3) */
revision: .byte REVISION /* EDID revision, usually 3 (for 1.3) */
/* If Bit 7=1 Digital input. If set, the following bit definitions apply:
Bits 6-1 Reserved, must be 0
Bit 0 Signal is compatible with VESA DFP 1.x TMDS CRGB,
1 pixel per clock, up to 8 bits per color, MSB aligned,
If Bit 7=0 Analog input. If clear, the following bit definitions apply:
Bits 6-5 Video white and sync levels, relative to blank
00=+0.7/-0.3 V; 01=+0.714/-0.286 V;
10=+1.0/-0.4 V; 11=+0.7/0 V
Bit 4 Blank-to-black setup (pedestal) expected
Bit 3 Separate sync supported
Bit 2 Composite sync (on HSync) supported
Bit 1 Sync on green supported
Bit 0 VSync pulse must be serrated when somposite or
sync-on-green is used. */
video_parms: .byte 0x6d
/* Maximum horizontal image size, in centimetres
(max 292 cm/115 in at 16:9 aspect ratio) */
max_hor_size: .byte xsize/10
/* Maximum vertical image size, in centimetres.
If either byte is 0, undefined (e.g. projector) */
max_vert_size: .byte ysize/10
/* Display gamma, minus 1, times 100 (range 1.00-3.5 */
gamma: .byte 120
/* Bit 7 DPMS standby supported
Bit 6 DPMS suspend supported
Bit 5 DPMS active-off supported
Bits 4-3 Display type: 00=monochrome; 01=RGB colour;
10=non-RGB multicolour; 11=undefined
Bit 2 Standard sRGB colour space. Bytes 25-34 must contain
sRGB standard values.
Bit 1 Preferred timing mode specified in descriptor block 1.
Bit 0 GTF supported with default parameter values. */
dsp_features: .byte 0xea
/* Chromaticity coordinates. */
/* Red and green least-significant bits
Bits 7-6 Red x value least-significant 2 bits
Bits 5-4 Red y value least-significant 2 bits
Bits 3-2 Green x value lst-significant 2 bits
Bits 1-0 Green y value least-significant 2 bits */
red_green_lsb: .byte 0x5e
/* Blue and white least-significant 2 bits */
blue_white_lsb: .byte 0xc0
/* Red x value most significant 8 bits.
0-255 encodes 0-0.996 (255/256); 0-0.999 (1023/1024) with lsbits */
red_x_msb: .byte 0xa4
/* Red y value most significant 8 bits */
red_y_msb: .byte 0x59
/* Green x and y value most significant 8 bits */
green_x_y_msb: .byte 0x4a,0x98
/* Blue x and y value most significant 8 bits */
blue_x_y_msb: .byte 0x25,0x20
/* Default white point x and y value most significant 8 bits */
white_x_y_msb: .byte 0x50,0x54
/* Established timings */
/* Bit 7 720x400 @ 70 Hz
Bit 6 720x400 @ 88 Hz
Bit 5 640x480 @ 60 Hz
Bit 4 640x480 @ 67 Hz
Bit 3 640x480 @ 72 Hz
Bit 2 640x480 @ 75 Hz
Bit 1 800x600 @ 56 Hz
Bit 0 800x600 @ 60 Hz */
estbl_timing1: .byte ESTABLISHED_TIMING1_BITS
/* Bit 7 800x600 @ 72 Hz
Bit 6 800x600 @ 75 Hz
Bit 5 832x624 @ 75 Hz
Bit 4 1024x768 @ 87 Hz, interlaced (1024x768)
Bit 3 1024x768 @ 60 Hz
Bit 2 1024x768 @ 72 Hz
Bit 1 1024x768 @ 75 Hz
Bit 0 1280x1024 @ 75 Hz */
estbl_timing2: .byte ESTABLISHED_TIMING2_BITS
/* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II)
Bits 6-0 Other manufacturer-specific display mod */
estbl_timing3: .byte ESTABLISHED_TIMING3_BITS
/* Standard timing */
/* X resolution, less 31, divided by 8 (256-2288 pixels) */
std_xres: .byte (XPIX/8)-31
/* Y resolution, X:Y pixel ratio
Bits 7-6 X:Y pixel ratio: 00=16:10; 01=4:3; 10=5:4; 11=16:9.
Bits 5-0 Vertical frequency, less 60 (60-123 Hz) */
std_vres: .byte (XY_RATIO<<6)+VFREQ-60
.fill 7,2,0x0101 /* Unused */
descriptor1:
/* Pixel clock in 10 kHz units. (0.-655.35 MHz, little-endian) */
clock: .hword CLOCK/10
/* Horizontal active pixels 8 lsbits (0-4095) */
x_act_lsb: .byte XPIX&0xff
/* Horizontal blanking pixels 8 lsbits (0-4095)
End of active to start of next active. */
x_blk_lsb: .byte XBLANK&0xff
/* Bits 7-4 Horizontal active pixels 4 msbits
Bits 3-0 Horizontal blanking pixels 4 msbits */
x_msbs: .byte msbs2(XPIX,XBLANK)
/* Vertical active lines 8 lsbits (0-4095) */
y_act_lsb: .byte YPIX&0xff
/* Vertical blanking lines 8 lsbits (0-4095) */
y_blk_lsb: .byte YBLANK&0xff
/* Bits 7-4 Vertical active lines 4 msbits
Bits 3-0 Vertical blanking lines 4 msbits */
y_msbs: .byte msbs2(YPIX,YBLANK)
/* Horizontal sync offset pixels 8 lsbits (0-1023) From blanking start */
x_snc_off_lsb: .byte XOFFSET&0xff
/* Horizontal sync pulse width pixels 8 lsbits (0-1023) */
x_snc_pls_lsb: .byte XPULSE&0xff
/* Bits 7-4 Vertical sync offset lines 4 lsbits (0-63)
Bits 3-0 Vertical sync pulse width lines 4 lsbits (0-63) */
y_snc_lsb: .byte lsbs2(YOFFSET, YPULSE)
/* Bits 7-6 Horizontal sync offset pixels 2 msbits
Bits 5-4 Horizontal sync pulse width pixels 2 msbits
Bits 3-2 Vertical sync offset lines 2 msbits
Bits 1-0 Vertical sync pulse width lines 2 msbits */
xy_snc_msbs: .byte msbs4(XOFFSET,XPULSE,YOFFSET,YPULSE)
/* Horizontal display size, mm, 8 lsbits (0-4095 mm, 161 in) */
x_dsp_size: .byte xsize&0xff
/* Vertical display size, mm, 8 lsbits (0-4095 mm, 161 in) */
y_dsp_size: .byte ysize&0xff
/* Bits 7-4 Horizontal display size, mm, 4 msbits
Bits 3-0 Vertical display size, mm, 4 msbits */
dsp_size_mbsb: .byte msbs2(xsize,ysize)
/* Horizontal border pixels (each side; total is twice this) */
x_border: .byte 0
/* Vertical border lines (each side; total is twice this) */
y_border: .byte 0
/* Bit 7 Interlaced
Bits 6-5 Stereo mode: 00=No stereo; other values depend on bit 0:
Bit 0=0: 01=Field sequential, sync=1 during right; 10=similar,
sync=1 during left; 11=4-way interleaved stereo
Bit 0=1 2-way interleaved stereo: 01=Right image on even lines;
10=Left image on even lines; 11=side-by-side
Bits 4-3 Sync type: 00=Analog composite; 01=Bipolar analog composite;
10=Digital composite (on HSync); 11=Digital separate
Bit 2 If digital separate: Vertical sync polarity (1=positive)
Other types: VSync serrated (HSync during VSync)
Bit 1 If analog sync: Sync on all 3 RGB lines (else green only)
Digital: HSync polarity (1=positive)
Bit 0 2-way line-interleaved stereo, if bits 4-3 are not 00. */
features: .byte 0x18+(VSYNC_POL<<2)+(HSYNC_POL<<1)
descriptor2: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xff /* Descriptor is monitor serial number (text) */
.byte 0 /* Must be zero */
start1: .ascii "Linux #0"
end1: .byte 0x0a /* End marker */
.fill 12-(end1-start1), 1, 0x20 /* Padded spaces */
descriptor3: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xfd /* Descriptor is monitor range limits */
.byte 0 /* Must be zero */
start2: .byte VFREQ-1 /* Minimum vertical field rate (1-255 Hz) */
.byte VFREQ+1 /* Maximum vertical field rate (1-255 Hz) */
.byte (CLOCK/(XPIX+XBLANK))-1 /* Minimum horizontal line rate
(1-255 kHz) */
.byte (CLOCK/(XPIX+XBLANK))+1 /* Maximum horizontal line rate
(1-255 kHz) */
.byte (CLOCK/10000)+1 /* Maximum pixel clock rate, rounded up
to 10 MHz multiple (10-2550 MHz) */
.byte 0 /* No extended timing information type */
end2: .byte 0x0a /* End marker */
.fill 12-(end2-start2), 1, 0x20 /* Padded spaces */
descriptor4: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xfc /* Descriptor is text */
.byte 0 /* Must be zero */
start3: .ascii TIMING_NAME
end3: .byte 0x0a /* End marker */
.fill 12-(end3-start3), 1, 0x20 /* Padded spaces */
extensions: .byte 0 /* Number of extensions to follow */
checksum: .byte CRC /* Sum of all bytes must be 0 */
|
aixcc-public/challenge-001-exemplar-source
| 1,307
|
tools/edid/1024x768.S
|
/*
1024x768.S: EDID data set for standard 1024x768 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 65000 /* kHz */
#define XPIX 1024
#define YPIX 768
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 320
#define YBLANK 38
#define XOFFSET 8
#define XPULSE 144
#define YOFFSET 3
#define YPULSE 6
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux XGA"
#define ESTABLISHED_TIMING2_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */
#define HSYNC_POL 0
#define VSYNC_POL 0
#include "edid.S"
|
aixcc-public/challenge-001-exemplar-source
| 1,277
|
tools/edid/1920x1080.S
|
/*
1920x1080.S: EDID data set for standard 1920x1080 60 Hz monitor
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 148500 /* kHz */
#define XPIX 1920
#define YPIX 1080
#define XY_RATIO XY_RATIO_16_9
#define XBLANK 280
#define YBLANK 45
#define XOFFSET 88
#define XPULSE 44
#define YOFFSET 4
#define YPULSE 5
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux FHD"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
aixcc-public/challenge-001-exemplar-source
| 1,556
|
tools/perf/arch/arm/tests/regs_load.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#define R0 0x00
#define R1 0x08
#define R2 0x10
#define R3 0x18
#define R4 0x20
#define R5 0x28
#define R6 0x30
#define R7 0x38
#define R8 0x40
#define R9 0x48
#define SL 0x50
#define FP 0x58
#define IP 0x60
#define SP 0x68
#define LR 0x70
#define PC 0x78
/*
* Implementation of void perf_regs_load(u64 *regs);
*
* This functions fills in the 'regs' buffer from the actual registers values,
* in the way the perf built-in unwinding test expects them:
* - the PC at the time at the call to this function. Since this function
* is called using a bl instruction, the PC value is taken from LR.
* The built-in unwinding test then unwinds the call stack from the dwarf
* information in unwind__get_entries.
*
* Notes:
* - the 8 bytes stride in the registers offsets comes from the fact
* that the registers are stored in an u64 array (u64 *regs),
* - the regs buffer needs to be zeroed before the call to this function,
* in this case using a calloc in dwarf-unwind.c.
*/
.text
.type perf_regs_load,%function
SYM_FUNC_START(perf_regs_load)
str r0, [r0, #R0]
str r1, [r0, #R1]
str r2, [r0, #R2]
str r3, [r0, #R3]
str r4, [r0, #R4]
str r5, [r0, #R5]
str r6, [r0, #R6]
str r7, [r0, #R7]
str r8, [r0, #R8]
str r9, [r0, #R9]
str sl, [r0, #SL]
str fp, [r0, #FP]
str ip, [r0, #IP]
str sp, [r0, #SP]
str lr, [r0, #LR]
str lr, [r0, #PC] // store pc as lr in order to skip the call
// to this function
mov pc, lr
SYM_FUNC_END(perf_regs_load)
|
aixcc-public/challenge-001-exemplar-source
| 1,937
|
tools/perf/arch/x86/tests/regs_load.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#define AX 0
#define BX 1 * 8
#define CX 2 * 8
#define DX 3 * 8
#define SI 4 * 8
#define DI 5 * 8
#define BP 6 * 8
#define SP 7 * 8
#define IP 8 * 8
#define FLAGS 9 * 8
#define CS 10 * 8
#define SS 11 * 8
#define DS 12 * 8
#define ES 13 * 8
#define FS 14 * 8
#define GS 15 * 8
#define R8 16 * 8
#define R9 17 * 8
#define R10 18 * 8
#define R11 19 * 8
#define R12 20 * 8
#define R13 21 * 8
#define R14 22 * 8
#define R15 23 * 8
.text
#ifdef HAVE_ARCH_X86_64_SUPPORT
SYM_FUNC_START(perf_regs_load)
movq %rax, AX(%rdi)
movq %rbx, BX(%rdi)
movq %rcx, CX(%rdi)
movq %rdx, DX(%rdi)
movq %rsi, SI(%rdi)
movq %rdi, DI(%rdi)
movq %rbp, BP(%rdi)
leaq 8(%rsp), %rax /* exclude this call. */
movq %rax, SP(%rdi)
movq 0(%rsp), %rax
movq %rax, IP(%rdi)
movq $0, FLAGS(%rdi)
movq $0, CS(%rdi)
movq $0, SS(%rdi)
movq $0, DS(%rdi)
movq $0, ES(%rdi)
movq $0, FS(%rdi)
movq $0, GS(%rdi)
movq %r8, R8(%rdi)
movq %r9, R9(%rdi)
movq %r10, R10(%rdi)
movq %r11, R11(%rdi)
movq %r12, R12(%rdi)
movq %r13, R13(%rdi)
movq %r14, R14(%rdi)
movq %r15, R15(%rdi)
ret
SYM_FUNC_END(perf_regs_load)
#else
SYM_FUNC_START(perf_regs_load)
push %edi
movl 8(%esp), %edi
movl %eax, AX(%edi)
movl %ebx, BX(%edi)
movl %ecx, CX(%edi)
movl %edx, DX(%edi)
movl %esi, SI(%edi)
pop %eax
movl %eax, DI(%edi)
movl %ebp, BP(%edi)
leal 4(%esp), %eax /* exclude this call. */
movl %eax, SP(%edi)
movl 0(%esp), %eax
movl %eax, IP(%edi)
movl $0, FLAGS(%edi)
movl $0, CS(%edi)
movl $0, SS(%edi)
movl $0, DS(%edi)
movl $0, ES(%edi)
movl $0, FS(%edi)
movl $0, GS(%edi)
ret
SYM_FUNC_END(perf_regs_load)
#endif
/*
* We need to provide note.GNU-stack section, saying that we want
* NOT executable stack. Otherwise the final linking will assume that
* the ELF stack should not be restricted at all and set it RWX.
*/
.section .note.GNU-stack,"",@progbits
|
aixcc-public/challenge-001-exemplar-source
| 1,560
|
tools/perf/arch/powerpc/tests/regs_load.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
/* Offset is based on macros from arch/powerpc/include/uapi/asm/ptrace.h. */
#define R0 0
#define R1 1 * 8
#define R2 2 * 8
#define R3 3 * 8
#define R4 4 * 8
#define R5 5 * 8
#define R6 6 * 8
#define R7 7 * 8
#define R8 8 * 8
#define R9 9 * 8
#define R10 10 * 8
#define R11 11 * 8
#define R12 12 * 8
#define R13 13 * 8
#define R14 14 * 8
#define R15 15 * 8
#define R16 16 * 8
#define R17 17 * 8
#define R18 18 * 8
#define R19 19 * 8
#define R20 20 * 8
#define R21 21 * 8
#define R22 22 * 8
#define R23 23 * 8
#define R24 24 * 8
#define R25 25 * 8
#define R26 26 * 8
#define R27 27 * 8
#define R28 28 * 8
#define R29 29 * 8
#define R30 30 * 8
#define R31 31 * 8
#define NIP 32 * 8
#define CTR 35 * 8
#define LINK 36 * 8
#define XER 37 * 8
.globl perf_regs_load
perf_regs_load:
std 0, R0(3)
std 1, R1(3)
std 2, R2(3)
std 3, R3(3)
std 4, R4(3)
std 5, R5(3)
std 6, R6(3)
std 7, R7(3)
std 8, R8(3)
std 9, R9(3)
std 10, R10(3)
std 11, R11(3)
std 12, R12(3)
std 13, R13(3)
std 14, R14(3)
std 15, R15(3)
std 16, R16(3)
std 17, R17(3)
std 18, R18(3)
std 19, R19(3)
std 20, R20(3)
std 21, R21(3)
std 22, R22(3)
std 23, R23(3)
std 24, R24(3)
std 25, R25(3)
std 26, R26(3)
std 27, R27(3)
std 28, R28(3)
std 29, R29(3)
std 30, R30(3)
std 31, R31(3)
/* store NIP */
mflr 4
std 4, NIP(3)
/* Store LR */
std 4, LINK(3)
/* Store XER */
mfxer 4
std 4, XER(3)
/* Store CTR */
mfctr 4
std 4, CTR(3)
/* Restore original value of r4 */
ld 4, R4(3)
blr
|
aixcc-public/challenge-001-exemplar-source
| 2,246
|
tools/testing/selftests/sgx/test_encl_bootstrap.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright(c) 2016-20 Intel Corporation.
*/
.macro ENCLU
.byte 0x0f, 0x01, 0xd7
.endm
.section ".tcs", "aw"
.balign 4096
.fill 1, 8, 0 # STATE (set by CPU)
.fill 1, 8, 0 # FLAGS
.quad encl_ssa_tcs1 # OSSA
.fill 1, 4, 0 # CSSA (set by CPU)
.fill 1, 4, 1 # NSSA
.quad encl_entry # OENTRY
.fill 1, 8, 0 # AEP (set by EENTER and ERESUME)
.fill 1, 8, 0 # OFSBASE
.fill 1, 8, 0 # OGSBASE
.fill 1, 4, 0xFFFFFFFF # FSLIMIT
.fill 1, 4, 0xFFFFFFFF # GSLIMIT
.fill 4024, 1, 0 # Reserved
# TCS2
.fill 1, 8, 0 # STATE (set by CPU)
.fill 1, 8, 0 # FLAGS
.quad encl_ssa_tcs2 # OSSA
.fill 1, 4, 0 # CSSA (set by CPU)
.fill 1, 4, 1 # NSSA
.quad encl_entry # OENTRY
.fill 1, 8, 0 # AEP (set by EENTER and ERESUME)
.fill 1, 8, 0 # OFSBASE
.fill 1, 8, 0 # OGSBASE
.fill 1, 4, 0xFFFFFFFF # FSLIMIT
.fill 1, 4, 0xFFFFFFFF # GSLIMIT
.fill 4024, 1, 0 # Reserved
.text
encl_entry:
# RBX contains the base address for TCS, which is the first address
# inside the enclave for TCS #1 and one page into the enclave for
# TCS #2. By adding the value of encl_stack to it, we get
# the absolute address for the stack.
lea (encl_stack)(%rbx), %rax
jmp encl_entry_core
encl_dyn_entry:
# Entry point for dynamically created TCS page expected to follow
# its stack directly.
lea -1(%rbx), %rax
encl_entry_core:
xchg %rsp, %rax
push %rax
push %rcx # push the address after EENTER
push %rbx # push the enclave base address
call encl_body
pop %rbx # pop the enclave base address
/* Clear volatile GPRs, except RAX (EEXIT function). */
xor %rcx, %rcx
xor %rdx, %rdx
xor %rdi, %rdi
xor %rsi, %rsi
xor %r8, %r8
xor %r9, %r9
xor %r10, %r10
xor %r11, %r11
# Reset status flags.
add %rdx, %rdx # OF = SF = AF = CF = 0; ZF = PF = 1
# Prepare EEXIT target by popping the address of the instruction after
# EENTER to RBX.
pop %rbx
# Restore the caller stack.
pop %rax
mov %rax, %rsp
# EEXIT
mov $4, %rax
enclu
.section ".data", "aw"
encl_ssa_tcs1:
.space 4096
encl_ssa_tcs2:
.space 4096
.balign 4096
# Stack of TCS #1
.space 4096
encl_stack:
.balign 4096
# Stack of TCS #2
.space 4096
|
aixcc-public/challenge-001-exemplar-source
| 3,344
|
tools/testing/selftests/kvm/lib/aarch64/handlers.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.macro save_registers
add sp, sp, #-16 * 17
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
/*
* This stores sp_el1 into ex_regs.sp so exception handlers can "look"
* at it. It will _not_ be used to restore the sp on return from the
* exception so handlers can not update it.
*/
add x1, sp, #16 * 17
stp x30, x1, [sp, #16 * 15] /* x30, SP */
mrs x1, elr_el1
mrs x2, spsr_el1
stp x1, x2, [sp, #16 * 16] /* PC, PSTATE */
.endm
.macro restore_registers
ldp x1, x2, [sp, #16 * 16] /* PC, PSTATE */
msr elr_el1, x1
msr spsr_el1, x2
/* sp is not restored */
ldp x30, xzr, [sp, #16 * 15] /* x30, SP */
ldp x28, x29, [sp, #16 * 14]
ldp x26, x27, [sp, #16 * 13]
ldp x24, x25, [sp, #16 * 12]
ldp x22, x23, [sp, #16 * 11]
ldp x20, x21, [sp, #16 * 10]
ldp x18, x19, [sp, #16 * 9]
ldp x16, x17, [sp, #16 * 8]
ldp x14, x15, [sp, #16 * 7]
ldp x12, x13, [sp, #16 * 6]
ldp x10, x11, [sp, #16 * 5]
ldp x8, x9, [sp, #16 * 4]
ldp x6, x7, [sp, #16 * 3]
ldp x4, x5, [sp, #16 * 2]
ldp x2, x3, [sp, #16 * 1]
ldp x0, x1, [sp, #16 * 0]
add sp, sp, #16 * 17
eret
.endm
.pushsection ".entry.text", "ax"
.balign 0x800
.global vectors
vectors:
.popsection
.set vector, 0
/*
* Build an exception handler for vector and append a jump to it into
* vectors (while making sure that it's 0x80 aligned).
*/
.macro HANDLER, label
handler_\label:
save_registers
mov x0, sp
mov x1, #vector
bl route_exception
restore_registers
.pushsection ".entry.text", "ax"
.balign 0x80
b handler_\label
.popsection
.set vector, vector + 1
.endm
.macro HANDLER_INVALID
.pushsection ".entry.text", "ax"
.balign 0x80
/* This will abort so no need to save and restore registers. */
mov x0, #vector
mov x1, #0 /* ec */
mov x2, #0 /* valid_ec */
b kvm_exit_unexpected_exception
.popsection
.set vector, vector + 1
.endm
/*
* Caution: be sure to not add anything between the declaration of vectors
* above and these macro calls that will build the vectors table below it.
*/
HANDLER_INVALID // Synchronous EL1t
HANDLER_INVALID // IRQ EL1t
HANDLER_INVALID // FIQ EL1t
HANDLER_INVALID // Error EL1t
HANDLER el1h_sync // Synchronous EL1h
HANDLER el1h_irq // IRQ EL1h
HANDLER el1h_fiq // FIQ EL1h
HANDLER el1h_error // Error EL1h
HANDLER el0_sync_64 // Synchronous 64-bit EL0
HANDLER el0_irq_64 // IRQ 64-bit EL0
HANDLER el0_fiq_64 // FIQ 64-bit EL0
HANDLER el0_error_64 // Error 64-bit EL0
HANDLER el0_sync_32 // Synchronous 32-bit EL0
HANDLER el0_irq_32 // IRQ 32-bit EL0
HANDLER el0_fiq_32 // FIQ 32-bit EL0
HANDLER el0_error_32 // Error 32-bit EL0
|
aixcc-public/challenge-001-exemplar-source
| 1,284
|
tools/testing/selftests/kvm/lib/x86_64/handlers.S
|
handle_exception:
push %r15
push %r14
push %r13
push %r12
push %r11
push %r10
push %r9
push %r8
push %rdi
push %rsi
push %rbp
push %rbx
push %rdx
push %rcx
push %rax
mov %rsp, %rdi
call route_exception
pop %rax
pop %rcx
pop %rdx
pop %rbx
pop %rbp
pop %rsi
pop %rdi
pop %r8
pop %r9
pop %r10
pop %r11
pop %r12
pop %r13
pop %r14
pop %r15
/* Discard vector and error code. */
add $16, %rsp
iretq
/*
* Build the handle_exception wrappers which push the vector/error code on the
* stack and an array of pointers to those wrappers.
*/
.pushsection .rodata
.globl idt_handlers
idt_handlers:
.popsection
.macro HANDLERS has_error from to
vector = \from
.rept \to - \from + 1
.align 8
/* Fetch current address and append it to idt_handlers. */
666 :
.pushsection .rodata
.quad 666b
.popsection
.if ! \has_error
pushq $0
.endif
pushq $vector
jmp handle_exception
vector = vector + 1
.endr
.endm
.global idt_handler_code
idt_handler_code:
HANDLERS has_error=0 from=0 to=7
HANDLERS has_error=1 from=8 to=8
HANDLERS has_error=0 from=9 to=9
HANDLERS has_error=1 from=10 to=14
HANDLERS has_error=0 from=15 to=16
HANDLERS has_error=1 from=17 to=17
HANDLERS has_error=0 from=18 to=255
.section .note.GNU-stack, "", %progbits
|
aixcc-public/challenge-001-exemplar-source
| 2,436
|
tools/testing/selftests/arm64/mte/mte_helper.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2020 ARM Limited */
#include "mte_def.h"
.arch armv8.5-a+memtag
#define ENTRY(name) \
.globl name ;\
.p2align 2;\
.type name, @function ;\
name:
#define ENDPROC(name) \
.size name, .-name ;
.text
/*
* mte_insert_random_tag: Insert random tag and might be same as the source tag if
* the source pointer has it.
* Input:
* x0 - source pointer with a tag/no-tag
* Return:
* x0 - pointer with random tag
*/
ENTRY(mte_insert_random_tag)
irg x0, x0, xzr
ret
ENDPROC(mte_insert_random_tag)
/*
* mte_insert_new_tag: Insert new tag and different from the source tag if
* source pointer has it.
* Input:
* x0 - source pointer with a tag/no-tag
* Return:
* x0 - pointer with random tag
*/
ENTRY(mte_insert_new_tag)
gmi x1, x0, xzr
irg x0, x0, x1
ret
ENDPROC(mte_insert_new_tag)
/*
* mte_get_tag_address: Get the tag from given address.
* Input:
* x0 - source pointer
* Return:
* x0 - pointer with appended tag
*/
ENTRY(mte_get_tag_address)
ldg x0, [x0]
ret
ENDPROC(mte_get_tag_address)
/*
* mte_set_tag_address_range: Set the tag range from the given address
* Input:
* x0 - source pointer with tag data
* x1 - range
* Return:
* none
*/
ENTRY(mte_set_tag_address_range)
cbz x1, 2f
1:
stg x0, [x0, #0x0]
add x0, x0, #MT_GRANULE_SIZE
sub x1, x1, #MT_GRANULE_SIZE
cbnz x1, 1b
2:
ret
ENDPROC(mte_set_tag_address_range)
/*
* mt_clear_tag_address_range: Clear the tag range from the given address
* Input:
* x0 - source pointer with tag data
* x1 - range
* Return:
* none
*/
ENTRY(mte_clear_tag_address_range)
cbz x1, 2f
1:
stzg x0, [x0, #0x0]
add x0, x0, #MT_GRANULE_SIZE
sub x1, x1, #MT_GRANULE_SIZE
cbnz x1, 1b
2:
ret
ENDPROC(mte_clear_tag_address_range)
/*
* mte_enable_pstate_tco: Enable PSTATE.TCO (tag check override) field
* Input:
* none
* Return:
* none
*/
ENTRY(mte_enable_pstate_tco)
msr tco, #MT_PSTATE_TCO_EN
ret
ENDPROC(mte_enable_pstate_tco)
/*
* mte_disable_pstate_tco: Disable PSTATE.TCO (tag check override) field
* Input:
* none
* Return:
* none
*/
ENTRY(mte_disable_pstate_tco)
msr tco, #MT_PSTATE_TCO_DIS
ret
ENDPROC(mte_disable_pstate_tco)
/*
* mte_get_pstate_tco: Get PSTATE.TCO (tag check override) field
* Input:
* none
* Return:
* x0
*/
ENTRY(mte_get_pstate_tco)
mrs x0, tco
ubfx x0, x0, #MT_PSTATE_TCO_SHIFT, #1
ret
ENDPROC(mte_get_pstate_tco)
|
aixcc-public/challenge-001-exemplar-source
| 7,214
|
tools/testing/selftests/arm64/fp/za-test.S
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2021 ARM Limited.
// Original author: Mark Brown <broonie@kernel.org>
//
// Scalable Matrix Extension ZA context switch test
// Repeatedly writes unique test patterns into each ZA tile
// and reads them back to verify integrity.
//
// for x in `seq 1 NR_CPUS`; do sve-test & pids=$pids\ $! ; done
// (leave it running for as long as you want...)
// kill $pids
#include <asm/unistd.h>
#include "assembler.h"
#include "asm-offsets.h"
#include "sme-inst.h"
.arch_extension sve
#define MAXVL 2048
#define MAXVL_B (MAXVL / 8)
// Declare some storage space to shadow ZA register contents and a
// scratch buffer for a vector.
.pushsection .text
.data
.align 4
zaref:
.space MAXVL_B * MAXVL_B
scratch:
.space MAXVL_B
.popsection
// Trivial memory copy: copy x2 bytes, starting at address x1, to address x0.
// Clobbers x0-x3
function memcpy
cmp x2, #0
b.eq 1f
0: ldrb w3, [x1], #1
strb w3, [x0], #1
subs x2, x2, #1
b.ne 0b
1: ret
endfunction
// Generate a test pattern for storage in ZA
// x0: pid
// x1: row in ZA
// x2: generation
// These values are used to constuct a 32-bit pattern that is repeated in the
// scratch buffer as many times as will fit:
// bits 31:28 generation number (increments once per test_loop)
// bits 27:16 pid
// bits 15: 8 row number
// bits 7: 0 32-bit lane index
function pattern
mov w3, wzr
bfi w3, w0, #16, #12 // PID
bfi w3, w1, #8, #8 // Row
bfi w3, w2, #28, #4 // Generation
ldr x0, =scratch
mov w1, #MAXVL_B / 4
0: str w3, [x0], #4
add w3, w3, #1 // Lane
subs w1, w1, #1
b.ne 0b
ret
endfunction
// Get the address of shadow data for ZA horizontal vector xn
.macro _adrza xd, xn, nrtmp
ldr \xd, =zaref
rdsvl \nrtmp, 1
madd \xd, x\nrtmp, \xn, \xd
.endm
// Set up test pattern in a ZA horizontal vector
// x0: pid
// x1: row number
// x2: generation
function setup_za
mov x4, x30
mov x12, x1 // Use x12 for vector select
bl pattern // Get pattern in scratch buffer
_adrza x0, x12, 2 // Shadow buffer pointer to x0 and x5
mov x5, x0
ldr x1, =scratch
bl memcpy // length set up in x2 by _adrza
_ldr_za 12, 5 // load vector w12 from pointer x5
ret x4
endfunction
// Trivial memory compare: compare x2 bytes starting at address x0 with
// bytes starting at address x1.
// Returns only if all bytes match; otherwise, the program is aborted.
// Clobbers x0-x5.
function memcmp
cbz x2, 2f
stp x0, x1, [sp, #-0x20]!
str x2, [sp, #0x10]
mov x5, #0
0: ldrb w3, [x0, x5]
ldrb w4, [x1, x5]
add x5, x5, #1
cmp w3, w4
b.ne 1f
subs x2, x2, #1
b.ne 0b
1: ldr x2, [sp, #0x10]
ldp x0, x1, [sp], #0x20
b.ne barf
2: ret
endfunction
// Verify that a ZA vector matches its shadow in memory, else abort
// x0: row number
// Clobbers x0-x7 and x12.
function check_za
mov x3, x30
mov x12, x0
_adrza x5, x0, 6 // pointer to expected value in x5
mov x4, x0
ldr x7, =scratch // x7 is scratch
mov x0, x7 // Poison scratch
mov x1, x6
bl memfill_ae
_str_za 12, 7 // save vector w12 to pointer x7
mov x0, x5
mov x1, x7
mov x2, x6
mov x30, x3
b memcmp
endfunction
// Any SME register modified here can cause corruption in the main
// thread -- but *only* the locations modified here.
function irritator_handler
// Increment the irritation signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
add x0, x0, #1
str x0, [x2, #ucontext_regs + 8 * 23]
// Corrupt some random ZA data
#if 0
adr x0, .text + (irritator_handler - .text) / 16 * 16
movi v0.8b, #1
movi v9.16b, #2
movi v31.8b, #3
#endif
ret
endfunction
function tickle_handler
// Increment the signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
add x0, x0, #1
str x0, [x2, #ucontext_regs + 8 * 23]
ret
endfunction
function terminate_handler
mov w21, w0
mov x20, x2
puts "Terminated by signal "
mov w0, w21
bl putdec
puts ", no error, iterations="
ldr x0, [x20, #ucontext_regs + 8 * 22]
bl putdec
puts ", signals="
ldr x0, [x20, #ucontext_regs + 8 * 23]
bl putdecn
mov x0, #0
mov x8, #__NR_exit
svc #0
endfunction
// w0: signal number
// x1: sa_action
// w2: sa_flags
// Clobbers x0-x6,x8
function setsignal
str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]!
mov w4, w0
mov x5, x1
mov w6, w2
add x0, sp, #16
mov x1, #sa_sz
bl memclr
mov w0, w4
add x1, sp, #16
str w6, [x1, #sa_flags]
str x5, [x1, #sa_handler]
mov x2, #0
mov x3, #sa_mask_sz
mov x8, #__NR_rt_sigaction
svc #0
cbz w0, 1f
puts "sigaction failure\n"
b .Labort
1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16)
ret
endfunction
// Main program entry point
.globl _start
function _start
_start:
mov x23, #0 // signal count
mov w0, #SIGINT
adr x1, terminate_handler
mov w2, #SA_SIGINFO
bl setsignal
mov w0, #SIGTERM
adr x1, terminate_handler
mov w2, #SA_SIGINFO
bl setsignal
mov w0, #SIGUSR1
adr x1, irritator_handler
mov w2, #SA_SIGINFO
orr w2, w2, #SA_NODEFER
bl setsignal
mov w0, #SIGUSR2
adr x1, tickle_handler
mov w2, #SA_SIGINFO
orr w2, w2, #SA_NODEFER
bl setsignal
puts "Streaming mode "
smstart_za
// Sanity-check and report the vector length
rdsvl 19, 8
cmp x19, #128
b.lo 1f
cmp x19, #2048
b.hi 1f
tst x19, #(8 - 1)
b.eq 2f
1: puts "bad vector length: "
mov x0, x19
bl putdecn
b .Labort
2: puts "vector length:\t"
mov x0, x19
bl putdec
puts " bits\n"
// Obtain our PID, to ensure test pattern uniqueness between processes
mov x8, #__NR_getpid
svc #0
mov x20, x0
puts "PID:\t"
mov x0, x20
bl putdecn
mov x22, #0 // generation number, increments per iteration
.Ltest_loop:
rdsvl 0, 8
cmp x0, x19
b.ne vl_barf
rdsvl 21, 1 // Set up ZA & shadow with test pattern
0: mov x0, x20
sub x1, x21, #1
mov x2, x22
bl setup_za
subs x21, x21, #1
b.ne 0b
mov x8, #__NR_sched_yield // encourage preemption
1:
svc #0
mrs x0, S3_3_C4_C2_2 // SVCR should have ZA=1,SM=0
and x1, x0, #3
cmp x1, #2
b.ne svcr_barf
rdsvl 21, 1 // Verify that the data made it through
rdsvl 24, 1 // Verify that the data made it through
0: sub x0, x24, x21
bl check_za
subs x21, x21, #1
bne 0b
add x22, x22, #1 // Everything still working
b .Ltest_loop
.Labort:
mov x0, #0
mov x1, #SIGABRT
mov x8, #__NR_kill
svc #0
endfunction
function barf
// fpsimd.c acitivty log dump hack
// ldr w0, =0xdeadc0de
// mov w8, #__NR_exit
// svc #0
// end hack
smstop
mov x10, x0 // expected data
mov x11, x1 // actual data
mov x12, x2 // data size
puts "Mismatch: PID="
mov x0, x20
bl putdec
puts ", iteration="
mov x0, x22
bl putdec
puts ", row="
mov x0, x21
bl putdecn
puts "\tExpected ["
mov x0, x10
mov x1, x12
bl dumphex
puts "]\n\tGot ["
mov x0, x11
mov x1, x12
bl dumphex
puts "]\n"
mov x8, #__NR_getpid
svc #0
// fpsimd.c acitivty log dump hack
// ldr w0, =0xdeadc0de
// mov w8, #__NR_exit
// svc #0
// ^ end of hack
mov x1, #SIGABRT
mov x8, #__NR_kill
svc #0
// mov x8, #__NR_exit
// mov x1, #1
// svc #0
endfunction
function vl_barf
mov x10, x0
puts "Bad active VL: "
mov x0, x10
bl putdecn
mov x8, #__NR_exit
mov x1, #1
svc #0
endfunction
function svcr_barf
mov x10, x0
puts "Bad SVCR: "
mov x0, x10
bl putdecn
mov x8, #__NR_exit
mov x1, #1
svc #0
endfunction
|
aixcc-public/challenge-001-exemplar-source
| 9,537
|
tools/testing/selftests/arm64/fp/sve-test.S
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2015-2019 ARM Limited.
// Original author: Dave Martin <Dave.Martin@arm.com>
//
// Simple Scalable Vector Extension context switch test
// Repeatedly writes unique test patterns into each SVE register
// and reads them back to verify integrity.
//
// for x in `seq 1 NR_CPUS`; do sve-test & pids=$pids\ $! ; done
// (leave it running for as long as you want...)
// kill $pids
#include <asm/unistd.h>
#include "assembler.h"
#include "asm-offsets.h"
#include "sme-inst.h"
#define NZR 32
#define NPR 16
#define MAXVL_B (2048 / 8)
.arch_extension sve
.macro _sve_ldr_v zt, xn
ldr z\zt, [x\xn]
.endm
.macro _sve_str_v zt, xn
str z\zt, [x\xn]
.endm
.macro _sve_ldr_p pt, xn
ldr p\pt, [x\xn]
.endm
.macro _sve_str_p pt, xn
str p\pt, [x\xn]
.endm
// Generate accessor functions to read/write programmatically selected
// SVE registers.
// x0 is the register index to access
// x1 is the memory address to read from (getz,setp) or store to (setz,setp)
// All clobber x0-x2
define_accessor setz, NZR, _sve_ldr_v
define_accessor getz, NZR, _sve_str_v
define_accessor setp, NPR, _sve_ldr_p
define_accessor getp, NPR, _sve_str_p
// Declare some storate space to shadow the SVE register contents:
.pushsection .text
.data
.align 4
zref:
.space MAXVL_B * NZR
pref:
.space MAXVL_B / 8 * NPR
ffrref:
.space MAXVL_B / 8
scratch:
.space MAXVL_B
.popsection
// Generate a test pattern for storage in SVE registers
// x0: pid (16 bits)
// x1: register number (6 bits)
// x2: generation (4 bits)
// These values are used to constuct a 32-bit pattern that is repeated in the
// scratch buffer as many times as will fit:
// bits 31:28 generation number (increments once per test_loop)
// bits 27:22 32-bit lane index
// bits 21:16 register number
// bits 15: 0 pid
function pattern
orr w1, w0, w1, lsl #16
orr w2, w1, w2, lsl #28
ldr x0, =scratch
mov w1, #MAXVL_B / 4
0: str w2, [x0], #4
add w2, w2, #(1 << 22)
subs w1, w1, #1
bne 0b
ret
endfunction
// Get the address of shadow data for SVE Z-register Z<xn>
.macro _adrz xd, xn, nrtmp
ldr \xd, =zref
rdvl x\nrtmp, #1
madd \xd, x\nrtmp, \xn, \xd
.endm
// Get the address of shadow data for SVE P-register P<xn - NZR>
.macro _adrp xd, xn, nrtmp
ldr \xd, =pref
rdvl x\nrtmp, #1
lsr x\nrtmp, x\nrtmp, #3
sub \xn, \xn, #NZR
madd \xd, x\nrtmp, \xn, \xd
.endm
// Set up test pattern in a SVE Z-register
// x0: pid
// x1: register number
// x2: generation
function setup_zreg
mov x4, x30
mov x6, x1
bl pattern
_adrz x0, x6, 2
mov x5, x0
ldr x1, =scratch
bl memcpy
mov x0, x6
mov x1, x5
bl setz
ret x4
endfunction
// Set up test pattern in a SVE P-register
// x0: pid
// x1: register number
// x2: generation
function setup_preg
mov x4, x30
mov x6, x1
bl pattern
_adrp x0, x6, 2
mov x5, x0
ldr x1, =scratch
bl memcpy
mov x0, x6
mov x1, x5
bl setp
ret x4
endfunction
// Set up test pattern in the FFR
// x0: pid
// x2: generation
//
// We need to generate a canonical FFR value, which consists of a number of
// low "1" bits, followed by a number of zeros. This gives us 17 unique values
// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
// generation, and use that as the initial number of ones in the pattern.
// We fill the upper lanes of FFR with zeros.
// Beware: corrupts P0.
function setup_ffr
#ifndef SSVE
mov x4, x30
and w0, w0, #0x3
bfi w0, w2, #2, #2
mov w1, #1
lsl w1, w1, w0
sub w1, w1, #1
ldr x0, =ffrref
strh w1, [x0], 2
rdvl x1, #1
lsr x1, x1, #3
sub x1, x1, #2
bl memclr
mov x0, #0
ldr x1, =ffrref
bl setp
wrffr p0.b
ret x4
#else
ret
#endif
endfunction
// Trivial memory compare: compare x2 bytes starting at address x0 with
// bytes starting at address x1.
// Returns only if all bytes match; otherwise, the program is aborted.
// Clobbers x0-x5.
function memcmp
cbz x2, 2f
stp x0, x1, [sp, #-0x20]!
str x2, [sp, #0x10]
mov x5, #0
0: ldrb w3, [x0, x5]
ldrb w4, [x1, x5]
add x5, x5, #1
cmp w3, w4
b.ne 1f
subs x2, x2, #1
b.ne 0b
1: ldr x2, [sp, #0x10]
ldp x0, x1, [sp], #0x20
b.ne barf
2: ret
endfunction
// Verify that a SVE Z-register matches its shadow in memory, else abort
// x0: reg number
// Clobbers x0-x7.
function check_zreg
mov x3, x30
_adrz x5, x0, 6
mov x4, x0
ldr x7, =scratch
mov x0, x7
mov x1, x6
bl memfill_ae
mov x0, x4
mov x1, x7
bl getz
mov x0, x5
mov x1, x7
mov x2, x6
mov x30, x3
b memcmp
endfunction
// Verify that a SVE P-register matches its shadow in memory, else abort
// x0: reg number
// Clobbers x0-x7.
function check_preg
mov x3, x30
_adrp x5, x0, 6
mov x4, x0
ldr x7, =scratch
mov x0, x7
mov x1, x6
bl memfill_ae
mov x0, x4
mov x1, x7
bl getp
mov x0, x5
mov x1, x7
mov x2, x6
mov x30, x3
b memcmp
endfunction
// Verify that the FFR matches its shadow in memory, else abort
// Beware -- corrupts P0.
// Clobbers x0-x5.
function check_ffr
#ifndef SSVE
mov x3, x30
ldr x4, =scratch
rdvl x5, #1
lsr x5, x5, #3
mov x0, x4
mov x1, x5
bl memfill_ae
rdffr p0.b
mov x0, #0
mov x1, x4
bl getp
ldr x0, =ffrref
mov x1, x4
mov x2, x5
mov x30, x3
b memcmp
#else
ret
#endif
endfunction
// Any SVE register modified here can cause corruption in the main
// thread -- but *only* the registers modified here.
function irritator_handler
// Increment the irritation signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
add x0, x0, #1
str x0, [x2, #ucontext_regs + 8 * 23]
// Corrupt some random Z-regs
adr x0, .text + (irritator_handler - .text) / 16 * 16
movi v0.8b, #1
movi v9.16b, #2
movi v31.8b, #3
#ifndef SSVE
// And P0
rdffr p0.b
// And FFR
wrffr p15.b
#endif
ret
endfunction
function tickle_handler
// Increment the signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
add x0, x0, #1
str x0, [x2, #ucontext_regs + 8 * 23]
ret
endfunction
function terminate_handler
mov w21, w0
mov x20, x2
puts "Terminated by signal "
mov w0, w21
bl putdec
puts ", no error, iterations="
ldr x0, [x20, #ucontext_regs + 8 * 22]
bl putdec
puts ", signals="
ldr x0, [x20, #ucontext_regs + 8 * 23]
bl putdecn
mov x0, #0
mov x8, #__NR_exit
svc #0
endfunction
// w0: signal number
// x1: sa_action
// w2: sa_flags
// Clobbers x0-x6,x8
function setsignal
str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]!
mov w4, w0
mov x5, x1
mov w6, w2
add x0, sp, #16
mov x1, #sa_sz
bl memclr
mov w0, w4
add x1, sp, #16
str w6, [x1, #sa_flags]
str x5, [x1, #sa_handler]
mov x2, #0
mov x3, #sa_mask_sz
mov x8, #__NR_rt_sigaction
svc #0
cbz w0, 1f
puts "sigaction failure\n"
b .Labort
1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16)
ret
endfunction
// Main program entry point
.globl _start
function _start
_start:
mov x23, #0 // Irritation signal count
mov w0, #SIGINT
adr x1, terminate_handler
mov w2, #SA_SIGINFO
bl setsignal
mov w0, #SIGTERM
adr x1, terminate_handler
mov w2, #SA_SIGINFO
bl setsignal
mov w0, #SIGUSR1
adr x1, irritator_handler
mov w2, #SA_SIGINFO
orr w2, w2, #SA_NODEFER
bl setsignal
mov w0, #SIGUSR2
adr x1, tickle_handler
mov w2, #SA_SIGINFO
orr w2, w2, #SA_NODEFER
bl setsignal
#ifdef SSVE
puts "Streaming mode "
smstart_sm
#endif
// Sanity-check and report the vector length
rdvl x19, #8
cmp x19, #128
b.lo 1f
cmp x19, #2048
b.hi 1f
tst x19, #(8 - 1)
b.eq 2f
1: puts "Bad vector length: "
mov x0, x19
bl putdecn
b .Labort
2: puts "Vector length:\t"
mov x0, x19
bl putdec
puts " bits\n"
// Obtain our PID, to ensure test pattern uniqueness between processes
mov x8, #__NR_getpid
svc #0
mov x20, x0
puts "PID:\t"
mov x0, x20
bl putdecn
#ifdef SSVE
smstart_sm // syscalls will have exited streaming mode
#endif
mov x22, #0 // generation number, increments per iteration
.Ltest_loop:
rdvl x0, #8
cmp x0, x19
b.ne vl_barf
mov x21, #0 // Set up Z-regs & shadow with test pattern
0: mov x0, x20
mov x1, x21
and x2, x22, #0xf
bl setup_zreg
add x21, x21, #1
cmp x21, #NZR
b.lo 0b
mov x0, x20 // Set up FFR & shadow with test pattern
mov x1, #NZR + NPR
and x2, x22, #0xf
bl setup_ffr
0: mov x0, x20 // Set up P-regs & shadow with test pattern
mov x1, x21
and x2, x22, #0xf
bl setup_preg
add x21, x21, #1
cmp x21, #NZR + NPR
b.lo 0b
// Can't do this when SVE state is volatile across SVC:
// mov x8, #__NR_sched_yield // Encourage preemption
// svc #0
mov x21, #0
0: mov x0, x21
bl check_zreg
add x21, x21, #1
cmp x21, #NZR
b.lo 0b
0: mov x0, x21
bl check_preg
add x21, x21, #1
cmp x21, #NZR + NPR
b.lo 0b
bl check_ffr
add x22, x22, #1
b .Ltest_loop
.Labort:
mov x0, #0
mov x1, #SIGABRT
mov x8, #__NR_kill
svc #0
endfunction
function barf
// fpsimd.c acitivty log dump hack
// ldr w0, =0xdeadc0de
// mov w8, #__NR_exit
// svc #0
// end hack
mov x10, x0 // expected data
mov x11, x1 // actual data
mov x12, x2 // data size
puts "Mismatch: PID="
mov x0, x20
bl putdec
puts ", iteration="
mov x0, x22
bl putdec
puts ", reg="
mov x0, x21
bl putdecn
puts "\tExpected ["
mov x0, x10
mov x1, x12
bl dumphex
puts "]\n\tGot ["
mov x0, x11
mov x1, x12
bl dumphex
puts "]\n"
mov x8, #__NR_getpid
svc #0
// fpsimd.c acitivty log dump hack
// ldr w0, =0xdeadc0de
// mov w8, #__NR_exit
// svc #0
// ^ end of hack
mov x1, #SIGABRT
mov x8, #__NR_kill
svc #0
// mov x8, #__NR_exit
// mov x1, #1
// svc #0
endfunction
function vl_barf
mov x10, x0
puts "Bad active VL: "
mov x0, x10
bl putdecn
mov x8, #__NR_exit
mov x1, #1
svc #0
endfunction
|
aixcc-public/challenge-001-exemplar-source
| 5,892
|
tools/testing/selftests/arm64/fp/fpsimd-test.S
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2015-2019 ARM Limited.
// Original author: Dave Martin <Dave.Martin@arm.com>
//
// Simple FPSIMD context switch test
// Repeatedly writes unique test patterns into each FPSIMD register
// and reads them back to verify integrity.
//
// for x in `seq 1 NR_CPUS`; do fpsimd-test & pids=$pids\ $! ; done
// (leave it running for as long as you want...)
// kill $pids
#include <asm/unistd.h>
#include "assembler.h"
#include "asm-offsets.h"
#define NVR 32
#define MAXVL_B (128 / 8)
.macro _vldr Vn:req, Xt:req
ld1 {v\Vn\().2d}, [x\Xt]
.endm
.macro _vstr Vn:req, Xt:req
st1 {v\Vn\().2d}, [x\Xt]
.endm
// Generate accessor functions to read/write programmatically selected
// FPSIMD registers.
// x0 is the register index to access
// x1 is the memory address to read from (getv,setp) or store to (setv,setp)
// All clobber x0-x2
define_accessor setv, NVR, _vldr
define_accessor getv, NVR, _vstr
// Declare some storate space to shadow the SVE register contents:
.pushsection .text
.data
.align 4
vref:
.space MAXVL_B * NVR
scratch:
.space MAXVL_B
.popsection
// Generate a test pattern for storage in SVE registers
// x0: pid (16 bits)
// x1: register number (6 bits)
// x2: generation (4 bits)
function pattern
orr w1, w0, w1, lsl #16
orr w2, w1, w2, lsl #28
ldr x0, =scratch
mov w1, #MAXVL_B / 4
0: str w2, [x0], #4
add w2, w2, #(1 << 22)
subs w1, w1, #1
bne 0b
ret
endfunction
// Get the address of shadow data for FPSIMD V-register V<xn>
.macro _adrv xd, xn, nrtmp
ldr \xd, =vref
mov x\nrtmp, #16
madd \xd, x\nrtmp, \xn, \xd
.endm
// Set up test pattern in a FPSIMD V-register
// x0: pid
// x1: register number
// x2: generation
function setup_vreg
mov x4, x30
mov x6, x1
bl pattern
_adrv x0, x6, 2
mov x5, x0
ldr x1, =scratch
bl memcpy
mov x0, x6
mov x1, x5
bl setv
ret x4
endfunction
// Trivial memory compare: compare x2 bytes starting at address x0 with
// bytes starting at address x1.
// Returns only if all bytes match; otherwise, the program is aborted.
// Clobbers x0-x5.
function memcmp
cbz x2, 1f
mov x5, #0
0: ldrb w3, [x0, x5]
ldrb w4, [x1, x5]
add x5, x5, #1
cmp w3, w4
b.ne barf
subs x2, x2, #1
b.ne 0b
1: ret
endfunction
// Verify that a FPSIMD V-register matches its shadow in memory, else abort
// x0: reg number
// Clobbers x0-x5.
function check_vreg
mov x3, x30
_adrv x5, x0, 6
mov x4, x0
ldr x7, =scratch
mov x0, x7
mov x1, x6
bl memfill_ae
mov x0, x4
mov x1, x7
bl getv
mov x0, x5
mov x1, x7
mov x2, x6
mov x30, x3
b memcmp
endfunction
// Any SVE register modified here can cause corruption in the main
// thread -- but *only* the registers modified here.
function irritator_handler
// Increment the irritation signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
add x0, x0, #1
str x0, [x2, #ucontext_regs + 8 * 23]
// Corrupt some random V-regs
adr x0, .text + (irritator_handler - .text) / 16 * 16
movi v0.8b, #7
movi v9.16b, #9
movi v31.8b, #31
ret
endfunction
function tickle_handler
// Increment the signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
add x0, x0, #1
str x0, [x2, #ucontext_regs + 8 * 23]
ret
endfunction
function terminate_handler
mov w21, w0
mov x20, x2
puts "Terminated by signal "
mov w0, w21
bl putdec
puts ", no error, iterations="
ldr x0, [x20, #ucontext_regs + 8 * 22]
bl putdec
puts ", signals="
ldr x0, [x20, #ucontext_regs + 8 * 23]
bl putdecn
mov x0, #0
mov x8, #__NR_exit
svc #0
endfunction
// w0: signal number
// x1: sa_action
// w2: sa_flags
// Clobbers x0-x6,x8
function setsignal
str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]!
mov w4, w0
mov x5, x1
mov w6, w2
add x0, sp, #16
mov x1, #sa_sz
bl memclr
mov w0, w4
add x1, sp, #16
str w6, [x1, #sa_flags]
str x5, [x1, #sa_handler]
mov x2, #0
mov x3, #sa_mask_sz
mov x8, #__NR_rt_sigaction
svc #0
cbz w0, 1f
puts "sigaction failure\n"
b .Labort
1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16)
ret
endfunction
// Main program entry point
.globl _start
function _start
_start:
mov x23, #0 // signal count
mov w0, #SIGINT
adr x1, terminate_handler
mov w2, #SA_SIGINFO
bl setsignal
mov w0, #SIGTERM
adr x1, terminate_handler
mov w2, #SA_SIGINFO
bl setsignal
mov w0, #SIGUSR1
adr x1, irritator_handler
mov w2, #SA_SIGINFO
orr w2, w2, #SA_NODEFER
bl setsignal
mov w0, #SIGUSR2
adr x1, tickle_handler
mov w2, #SA_SIGINFO
orr w2, w2, #SA_NODEFER
bl setsignal
// Sanity-check and report the vector length
mov x19, #128
cmp x19, #128
b.lo 1f
cmp x19, #2048
b.hi 1f
tst x19, #(8 - 1)
b.eq 2f
1: puts "Bad vector length: "
mov x0, x19
bl putdecn
b .Labort
2: puts "Vector length:\t"
mov x0, x19
bl putdec
puts " bits\n"
// Obtain our PID, to ensure test pattern uniqueness between processes
mov x8, #__NR_getpid
svc #0
mov x20, x0
puts "PID:\t"
mov x0, x20
bl putdecn
mov x22, #0 // generation number, increments per iteration
.Ltest_loop:
mov x21, #0 // Set up V-regs & shadow with test pattern
0: mov x0, x20
mov x1, x21
and x2, x22, #0xf
bl setup_vreg
add x21, x21, #1
cmp x21, #NVR
b.lo 0b
// Can't do this when SVE state is volatile across SVC:
mov x8, #__NR_sched_yield // Encourage preemption
svc #0
mov x21, #0
0: mov x0, x21
bl check_vreg
add x21, x21, #1
cmp x21, #NVR
b.lo 0b
add x22, x22, #1
b .Ltest_loop
.Labort:
mov x0, #0
mov x1, #SIGABRT
mov x8, #__NR_kill
svc #0
endfunction
function barf
mov x10, x0 // expected data
mov x11, x1 // actual data
mov x12, x2 // data size
puts "Mismatch: PID="
mov x0, x20
bl putdec
puts ", iteration="
mov x0, x22
bl putdec
puts ", reg="
mov x0, x21
bl putdecn
puts "\tExpected ["
mov x0, x10
mov x1, x12
bl dumphex
puts "]\n\tGot ["
mov x0, x11
mov x1, x12
bl dumphex
puts "]\n"
mov x8, #__NR_exit
mov x1, #1
svc #0
endfunction
|
aixcc-public/challenge-001-exemplar-source
| 1,255
|
tools/testing/selftests/arm64/fp/fp-pidbench.S
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2021 ARM Limited.
// Original author: Mark Brown <broonie@kernel.org>
//
// Trivial syscall overhead benchmark.
//
// This is implemented in asm to ensure that we don't have any issues with
// system libraries using instructions that disrupt the test.
#include <asm/unistd.h>
#include "assembler.h"
.arch_extension sve
.macro test_loop per_loop
mov x10, x20
mov x8, #__NR_getpid
mrs x11, CNTVCT_EL0
1:
\per_loop
svc #0
sub x10, x10, #1
cbnz x10, 1b
mrs x12, CNTVCT_EL0
sub x0, x12, x11
bl putdec
puts "\n"
.endm
// Main program entry point
.globl _start
function _start
_start:
puts "Iterations per test: "
mov x20, #10000
lsl x20, x20, #8
mov x0, x20
bl putdec
puts "\n"
// Test having never used SVE
puts "No SVE: "
test_loop
// Check for SVE support - should use hwcap but that's hard in asm
mrs x0, ID_AA64PFR0_EL1
ubfx x0, x0, #32, #4
cbnz x0, 1f
puts "System does not support SVE\n"
b out
1:
// Execute a SVE instruction
puts "SVE VL: "
rdvl x0, #8
bl putdec
puts "\n"
puts "SVE used once: "
test_loop
// Use SVE per syscall
puts "SVE used per syscall: "
test_loop "rdvl x0, #8"
// And we're done
out:
mov x0, #0
mov x8, #__NR_exit
svc #0
|
aixcc-public/challenge-001-exemplar-source
| 2,831
|
tools/testing/selftests/arm64/fp/asm-utils.S
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2015-2021 ARM Limited.
// Original author: Dave Martin <Dave.Martin@arm.com>
//
// Utility functions for assembly code.
#include <asm/unistd.h>
#include "assembler.h"
// Print a single character x0 to stdout
// Clobbers x0-x2,x8
function putc
str x0, [sp, #-16]!
mov x0, #1 // STDOUT_FILENO
mov x1, sp
mov x2, #1
mov x8, #__NR_write
svc #0
add sp, sp, #16
ret
endfunction
.globl putc
// Print a NUL-terminated string starting at address x0 to stdout
// Clobbers x0-x3,x8
function puts
mov x1, x0
mov x2, #0
0: ldrb w3, [x0], #1
cbz w3, 1f
add x2, x2, #1
b 0b
1: mov w0, #1 // STDOUT_FILENO
mov x8, #__NR_write
svc #0
ret
endfunction
.globl puts
// Print an unsigned decimal number x0 to stdout
// Clobbers x0-x4,x8
function putdec
mov x1, sp
str x30, [sp, #-32]! // Result can't be > 20 digits
mov x2, #0
strb w2, [x1, #-1]! // Write the NUL terminator
mov x2, #10
0: udiv x3, x0, x2 // div-mod loop to generate the digits
msub x0, x3, x2, x0
add w0, w0, #'0'
strb w0, [x1, #-1]!
mov x0, x3
cbnz x3, 0b
ldrb w0, [x1]
cbnz w0, 1f
mov w0, #'0' // Print "0" for 0, not ""
strb w0, [x1, #-1]!
1: mov x0, x1
bl puts
ldr x30, [sp], #32
ret
endfunction
.globl putdec
// Print an unsigned decimal number x0 to stdout, followed by a newline
// Clobbers x0-x5,x8
function putdecn
mov x5, x30
bl putdec
mov x0, #'\n'
bl putc
ret x5
endfunction
.globl putdecn
// Clobbers x0-x3,x8
function puthexb
str x30, [sp, #-0x10]!
mov w3, w0
lsr w0, w0, #4
bl puthexnibble
mov w0, w3
ldr x30, [sp], #0x10
// fall through to puthexnibble
endfunction
.globl puthexb
// Clobbers x0-x2,x8
function puthexnibble
and w0, w0, #0xf
cmp w0, #10
blo 1f
add w0, w0, #'a' - ('9' + 1)
1: add w0, w0, #'0'
b putc
endfunction
.globl puthexnibble
// x0=data in, x1=size in, clobbers x0-x5,x8
function dumphex
str x30, [sp, #-0x10]!
mov x4, x0
mov x5, x1
0: subs x5, x5, #1
b.lo 1f
ldrb w0, [x4], #1
bl puthexb
b 0b
1: ldr x30, [sp], #0x10
ret
endfunction
.globl dumphex
// Trivial memory copy: copy x2 bytes, starting at address x1, to address x0.
// Clobbers x0-x3
function memcpy
cmp x2, #0
b.eq 1f
0: ldrb w3, [x1], #1
strb w3, [x0], #1
subs x2, x2, #1
b.ne 0b
1: ret
endfunction
.globl memcpy
// Fill x1 bytes starting at x0 with 0xae (for canary purposes)
// Clobbers x1, x2.
function memfill_ae
mov w2, #0xae
b memfill
endfunction
.globl memfill_ae
// Fill x1 bytes starting at x0 with 0.
// Clobbers x1, x2.
function memclr
mov w2, #0
endfunction
.globl memclr
// fall through to memfill
// Trivial memory fill: fill x1 bytes starting at address x0 with byte w2
// Clobbers x1
function memfill
cmp x1, #0
b.eq 1f
0: strb w2, [x0], #1
subs x1, x1, #1
b.ne 0b
1: ret
endfunction
.globl memfill
|
aixcc-public/challenge-001-exemplar-source
| 7,284
|
tools/testing/selftests/arm64/abi/syscall-abi-asm.S
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2021 ARM Limited.
//
// Assembly portion of the syscall ABI test
//
// Load values from memory into registers, invoke a syscall and save the
// register values back to memory for later checking. The syscall to be
// invoked is configured in x8 of the input GPR data.
//
// x0: SVE VL, 0 for FP only
// x1: SME VL
//
// GPRs: gpr_in, gpr_out
// FPRs: fpr_in, fpr_out
// Zn: z_in, z_out
// Pn: p_in, p_out
// FFR: ffr_in, ffr_out
// ZA: za_in, za_out
// SVCR: svcr_in, svcr_out
#include "syscall-abi.h"
.arch_extension sve
/*
* LDR (vector to ZA array):
* LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
*/
.macro _ldr_za nw, nxbase, offset=0
.inst 0xe1000000 \
| (((\nw) & 3) << 13) \
| ((\nxbase) << 5) \
| ((\offset) & 7)
.endm
/*
* STR (vector from ZA array):
* STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
*/
.macro _str_za nw, nxbase, offset=0
.inst 0xe1200000 \
| (((\nw) & 3) << 13) \
| ((\nxbase) << 5) \
| ((\offset) & 7)
.endm
.globl do_syscall
do_syscall:
// Store callee saved registers x19-x29 (80 bytes) plus x0 and x1
stp x29, x30, [sp, #-112]!
mov x29, sp
stp x0, x1, [sp, #16]
stp x19, x20, [sp, #32]
stp x21, x22, [sp, #48]
stp x23, x24, [sp, #64]
stp x25, x26, [sp, #80]
stp x27, x28, [sp, #96]
// Set SVCR if we're doing SME
cbz x1, 1f
adrp x2, svcr_in
ldr x2, [x2, :lo12:svcr_in]
msr S3_3_C4_C2_2, x2
1:
// Load ZA if it's enabled - uses x12 as scratch due to SME LDR
tbz x2, #SVCR_ZA_SHIFT, 1f
mov w12, #0
ldr x2, =za_in
2: _ldr_za 12, 2
add x2, x2, x1
add x12, x12, #1
cmp x1, x12
bne 2b
1:
// Load GPRs x8-x28, and save our SP/FP for later comparison
ldr x2, =gpr_in
add x2, x2, #64
ldp x8, x9, [x2], #16
ldp x10, x11, [x2], #16
ldp x12, x13, [x2], #16
ldp x14, x15, [x2], #16
ldp x16, x17, [x2], #16
ldp x18, x19, [x2], #16
ldp x20, x21, [x2], #16
ldp x22, x23, [x2], #16
ldp x24, x25, [x2], #16
ldp x26, x27, [x2], #16
ldr x28, [x2], #8
str x29, [x2], #8 // FP
str x30, [x2], #8 // LR
// Load FPRs if we're not doing SVE
cbnz x0, 1f
ldr x2, =fpr_in
ldp q0, q1, [x2]
ldp q2, q3, [x2, #16 * 2]
ldp q4, q5, [x2, #16 * 4]
ldp q6, q7, [x2, #16 * 6]
ldp q8, q9, [x2, #16 * 8]
ldp q10, q11, [x2, #16 * 10]
ldp q12, q13, [x2, #16 * 12]
ldp q14, q15, [x2, #16 * 14]
ldp q16, q17, [x2, #16 * 16]
ldp q18, q19, [x2, #16 * 18]
ldp q20, q21, [x2, #16 * 20]
ldp q22, q23, [x2, #16 * 22]
ldp q24, q25, [x2, #16 * 24]
ldp q26, q27, [x2, #16 * 26]
ldp q28, q29, [x2, #16 * 28]
ldp q30, q31, [x2, #16 * 30]
1:
// Load the SVE registers if we're doing SVE/SME
cbz x0, 1f
ldr x2, =z_in
ldr z0, [x2, #0, MUL VL]
ldr z1, [x2, #1, MUL VL]
ldr z2, [x2, #2, MUL VL]
ldr z3, [x2, #3, MUL VL]
ldr z4, [x2, #4, MUL VL]
ldr z5, [x2, #5, MUL VL]
ldr z6, [x2, #6, MUL VL]
ldr z7, [x2, #7, MUL VL]
ldr z8, [x2, #8, MUL VL]
ldr z9, [x2, #9, MUL VL]
ldr z10, [x2, #10, MUL VL]
ldr z11, [x2, #11, MUL VL]
ldr z12, [x2, #12, MUL VL]
ldr z13, [x2, #13, MUL VL]
ldr z14, [x2, #14, MUL VL]
ldr z15, [x2, #15, MUL VL]
ldr z16, [x2, #16, MUL VL]
ldr z17, [x2, #17, MUL VL]
ldr z18, [x2, #18, MUL VL]
ldr z19, [x2, #19, MUL VL]
ldr z20, [x2, #20, MUL VL]
ldr z21, [x2, #21, MUL VL]
ldr z22, [x2, #22, MUL VL]
ldr z23, [x2, #23, MUL VL]
ldr z24, [x2, #24, MUL VL]
ldr z25, [x2, #25, MUL VL]
ldr z26, [x2, #26, MUL VL]
ldr z27, [x2, #27, MUL VL]
ldr z28, [x2, #28, MUL VL]
ldr z29, [x2, #29, MUL VL]
ldr z30, [x2, #30, MUL VL]
ldr z31, [x2, #31, MUL VL]
// Only set a non-zero FFR, test patterns must be zero since the
// syscall should clear it - this lets us handle FA64.
ldr x2, =ffr_in
ldr p0, [x2, #0]
ldr x2, [x2, #0]
cbz x2, 2f
wrffr p0.b
2:
ldr x2, =p_in
ldr p0, [x2, #0, MUL VL]
ldr p1, [x2, #1, MUL VL]
ldr p2, [x2, #2, MUL VL]
ldr p3, [x2, #3, MUL VL]
ldr p4, [x2, #4, MUL VL]
ldr p5, [x2, #5, MUL VL]
ldr p6, [x2, #6, MUL VL]
ldr p7, [x2, #7, MUL VL]
ldr p8, [x2, #8, MUL VL]
ldr p9, [x2, #9, MUL VL]
ldr p10, [x2, #10, MUL VL]
ldr p11, [x2, #11, MUL VL]
ldr p12, [x2, #12, MUL VL]
ldr p13, [x2, #13, MUL VL]
ldr p14, [x2, #14, MUL VL]
ldr p15, [x2, #15, MUL VL]
1:
// Do the syscall
svc #0
// Save GPRs x8-x30
ldr x2, =gpr_out
add x2, x2, #64
stp x8, x9, [x2], #16
stp x10, x11, [x2], #16
stp x12, x13, [x2], #16
stp x14, x15, [x2], #16
stp x16, x17, [x2], #16
stp x18, x19, [x2], #16
stp x20, x21, [x2], #16
stp x22, x23, [x2], #16
stp x24, x25, [x2], #16
stp x26, x27, [x2], #16
stp x28, x29, [x2], #16
str x30, [x2]
// Restore x0 and x1 for feature checks
ldp x0, x1, [sp, #16]
// Save FPSIMD state
ldr x2, =fpr_out
stp q0, q1, [x2]
stp q2, q3, [x2, #16 * 2]
stp q4, q5, [x2, #16 * 4]
stp q6, q7, [x2, #16 * 6]
stp q8, q9, [x2, #16 * 8]
stp q10, q11, [x2, #16 * 10]
stp q12, q13, [x2, #16 * 12]
stp q14, q15, [x2, #16 * 14]
stp q16, q17, [x2, #16 * 16]
stp q18, q19, [x2, #16 * 18]
stp q20, q21, [x2, #16 * 20]
stp q22, q23, [x2, #16 * 22]
stp q24, q25, [x2, #16 * 24]
stp q26, q27, [x2, #16 * 26]
stp q28, q29, [x2, #16 * 28]
stp q30, q31, [x2, #16 * 30]
// Save SVCR if we're doing SME
cbz x1, 1f
mrs x2, S3_3_C4_C2_2
adrp x3, svcr_out
str x2, [x3, :lo12:svcr_out]
1:
// Save ZA if it's enabled - uses x12 as scratch due to SME STR
tbz x2, #SVCR_ZA_SHIFT, 1f
mov w12, #0
ldr x2, =za_out
2: _str_za 12, 2
add x2, x2, x1
add x12, x12, #1
cmp x1, x12
bne 2b
1:
// Save the SVE state if we have some
cbz x0, 1f
ldr x2, =z_out
str z0, [x2, #0, MUL VL]
str z1, [x2, #1, MUL VL]
str z2, [x2, #2, MUL VL]
str z3, [x2, #3, MUL VL]
str z4, [x2, #4, MUL VL]
str z5, [x2, #5, MUL VL]
str z6, [x2, #6, MUL VL]
str z7, [x2, #7, MUL VL]
str z8, [x2, #8, MUL VL]
str z9, [x2, #9, MUL VL]
str z10, [x2, #10, MUL VL]
str z11, [x2, #11, MUL VL]
str z12, [x2, #12, MUL VL]
str z13, [x2, #13, MUL VL]
str z14, [x2, #14, MUL VL]
str z15, [x2, #15, MUL VL]
str z16, [x2, #16, MUL VL]
str z17, [x2, #17, MUL VL]
str z18, [x2, #18, MUL VL]
str z19, [x2, #19, MUL VL]
str z20, [x2, #20, MUL VL]
str z21, [x2, #21, MUL VL]
str z22, [x2, #22, MUL VL]
str z23, [x2, #23, MUL VL]
str z24, [x2, #24, MUL VL]
str z25, [x2, #25, MUL VL]
str z26, [x2, #26, MUL VL]
str z27, [x2, #27, MUL VL]
str z28, [x2, #28, MUL VL]
str z29, [x2, #29, MUL VL]
str z30, [x2, #30, MUL VL]
str z31, [x2, #31, MUL VL]
ldr x2, =p_out
str p0, [x2, #0, MUL VL]
str p1, [x2, #1, MUL VL]
str p2, [x2, #2, MUL VL]
str p3, [x2, #3, MUL VL]
str p4, [x2, #4, MUL VL]
str p5, [x2, #5, MUL VL]
str p6, [x2, #6, MUL VL]
str p7, [x2, #7, MUL VL]
str p8, [x2, #8, MUL VL]
str p9, [x2, #9, MUL VL]
str p10, [x2, #10, MUL VL]
str p11, [x2, #11, MUL VL]
str p12, [x2, #12, MUL VL]
str p13, [x2, #13, MUL VL]
str p14, [x2, #14, MUL VL]
str p15, [x2, #15, MUL VL]
// Only save FFR if we wrote a value for SME
ldr x2, =ffr_in
ldr x2, [x2, #0]
cbz x2, 1f
ldr x2, =ffr_out
rdffr p0.b
str p0, [x2, #0]
1:
// Restore callee saved registers x19-x30
ldp x19, x20, [sp, #32]
ldp x21, x22, [sp, #48]
ldp x23, x24, [sp, #64]
ldp x25, x26, [sp, #80]
ldp x27, x28, [sp, #96]
ldp x29, x30, [sp], #112
// Clear SVCR if we were doing SME so future tests don't have ZA
cbz x1, 1f
msr S3_3_C4_C2_2, xzr
1:
ret
|
aixcc-public/challenge-001-exemplar-source
| 1,687
|
tools/testing/selftests/arm64/signal/signals.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2019 ARM Limited */
#include <asm/unistd.h>
.section .rodata, "a"
call_fmt:
.asciz "Calling sigreturn with fake sigframe sized:%zd at SP @%08lX\n"
.text
.globl fake_sigreturn
/* fake_sigreturn x0:&sigframe, x1:sigframe_size, x2:misalign_bytes */
fake_sigreturn:
stp x29, x30, [sp, #-16]!
mov x29, sp
mov x20, x0
mov x21, x1
mov x22, x2
/* create space on the stack for fake sigframe 16 bytes-aligned */
add x0, x21, x22
add x0, x0, #15
bic x0, x0, #15 /* round_up(sigframe_size + misalign_bytes, 16) */
sub sp, sp, x0
add x23, sp, x22 /* new sigframe base with misaligment if any */
ldr x0, =call_fmt
mov x1, x21
mov x2, x23
bl printf
/* memcpy the provided content, while still keeping SP aligned */
mov x0, x23
mov x1, x20
mov x2, x21
bl memcpy
/*
* Here saving a last minute SP to current->token acts as a marker:
* if we got here, we are successfully faking a sigreturn; in other
* words we are sure no bad fatal signal has been raised till now
* for unrelated reasons, so we should consider the possibly observed
* fatal signal like SEGV coming from Kernel restore_sigframe() and
* triggered as expected from our test-case.
* For simplicity this assumes that current field 'token' is laid out
* as first in struct tdescr
*/
ldr x0, current
str x23, [x0]
/* finally move SP to misaligned address...if any requested */
mov sp, x23
mov x8, #__NR_rt_sigreturn
svc #0
/*
* Above sigreturn should not return...looping here leads to a timeout
* and ensure proper and clean test failure, instead of jumping around
* on a potentially corrupted stack.
*/
b .
|
aixcc-public/challenge-001-exemplar-source
| 1,294
|
tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* test helper assembly functions
*
* Copyright (C) 2016 Simon Guo, IBM Corporation.
* Copyright 2022 Michael Ellerman, IBM Corporation.
*/
#include "basic_asm.h"
#define GPR_SIZE __SIZEOF_LONG__
#define FIRST_GPR 14
#define NUM_GPRS (32 - FIRST_GPR)
#define STACK_SIZE (NUM_GPRS * GPR_SIZE)
// gpr_child_loop(int *read_flag, int *write_flag,
// unsigned long *gpr_buf, double *fpr_buf);
FUNC_START(gpr_child_loop)
// r3 = read_flag
// r4 = write_flag
// r5 = gpr_buf
// r6 = fpr_buf
PUSH_BASIC_STACK(STACK_SIZE)
// Save non-volatile GPRs
OP_REGS PPC_STL, GPR_SIZE, FIRST_GPR, 31, %r1, STACK_FRAME_LOCAL(0, 0), FIRST_GPR
// Load GPRs with expected values
OP_REGS PPC_LL, GPR_SIZE, FIRST_GPR, 31, r5, 0, FIRST_GPR
// Load FPRs with expected values
OP_REGS lfd, 8, 0, 31, r6
// Signal to parent that we're ready
li r0, 1
stw r0, 0(r4)
// Wait for parent to finish
1: lwz r0, 0(r3)
cmpwi r0, 0
beq 1b // Loop while flag is zero
// Save GPRs back to caller buffer
OP_REGS PPC_STL, GPR_SIZE, FIRST_GPR, 31, r5, 0, FIRST_GPR
// Save FPRs
OP_REGS stfd, 8, 0, 31, r6
// Reload non-volatile GPRs
OP_REGS PPC_LL, GPR_SIZE, FIRST_GPR, 31, %r1, STACK_FRAME_LOCAL(0, 0), FIRST_GPR
POP_BASIC_STACK(STACK_SIZE)
blr
|
aixcc-public/challenge-001-exemplar-source
| 10,214
|
tools/testing/selftests/powerpc/copyloops/memcpy_power7.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Copyright (C) IBM Corporation, 2012
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
#include <asm/ppc_asm.h>
#ifndef SELFTEST_CASE
/* 0 == don't use VMX, 1 == use VMX */
#define SELFTEST_CASE 0
#endif
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
#else
#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
#endif
_GLOBAL(memcpy_power7)
cmpldi r5,16
cmpldi cr1,r5,4096
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
blt .Lshort_copy
#ifdef CONFIG_ALTIVEC
test_feature = SELFTEST_CASE
BEGIN_FTR_SECTION
bgt cr1, .Lvmx_copy
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
/* Get the source 8B aligned */
neg r6,r4
mtocrf 0x01,r6
clrldi r6,r6,(64-3)
bf cr7*4+3,1f
lbz r0,0(r4)
addi r4,r4,1
stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
3: sub r5,r5,r6
cmpldi r5,128
blt 5f
mflr r0
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
std r17,STK_REG(R17)(r1)
std r18,STK_REG(R18)(r1)
std r19,STK_REG(R19)(r1)
std r20,STK_REG(R20)(r1)
std r21,STK_REG(R21)(r1)
std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1)
srdi r6,r5,7
mtctr r6
/* Now do cacheline (128B) sized loads and stores. */
.align 5
4:
ld r0,0(r4)
ld r6,8(r4)
ld r7,16(r4)
ld r8,24(r4)
ld r9,32(r4)
ld r10,40(r4)
ld r11,48(r4)
ld r12,56(r4)
ld r14,64(r4)
ld r15,72(r4)
ld r16,80(r4)
ld r17,88(r4)
ld r18,96(r4)
ld r19,104(r4)
ld r20,112(r4)
ld r21,120(r4)
addi r4,r4,128
std r0,0(r3)
std r6,8(r3)
std r7,16(r3)
std r8,24(r3)
std r9,32(r3)
std r10,40(r3)
std r11,48(r3)
std r12,56(r3)
std r14,64(r3)
std r15,72(r3)
std r16,80(r3)
std r17,88(r3)
std r18,96(r3)
std r19,104(r3)
std r20,112(r3)
std r21,120(r3)
addi r3,r3,128
bdnz 4b
clrldi r5,r5,(64-7)
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(R20)(r1)
ld r21,STK_REG(R21)(r1)
ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE
/* Up to 127B to go */
5: srdi r6,r5,4
mtocrf 0x01,r6
6: bf cr7*4+1,7f
ld r0,0(r4)
ld r6,8(r4)
ld r7,16(r4)
ld r8,24(r4)
ld r9,32(r4)
ld r10,40(r4)
ld r11,48(r4)
ld r12,56(r4)
addi r4,r4,64
std r0,0(r3)
std r6,8(r3)
std r7,16(r3)
std r8,24(r3)
std r9,32(r3)
std r10,40(r3)
std r11,48(r3)
std r12,56(r3)
addi r3,r3,64
/* Up to 63B to go */
7: bf cr7*4+2,8f
ld r0,0(r4)
ld r6,8(r4)
ld r7,16(r4)
ld r8,24(r4)
addi r4,r4,32
std r0,0(r3)
std r6,8(r3)
std r7,16(r3)
std r8,24(r3)
addi r3,r3,32
/* Up to 31B to go */
8: bf cr7*4+3,9f
ld r0,0(r4)
ld r6,8(r4)
addi r4,r4,16
std r0,0(r3)
std r6,8(r3)
addi r3,r3,16
9: clrldi r5,r5,(64-4)
/* Up to 15B to go */
.Lshort_copy:
mtocrf 0x01,r5
bf cr7*4+0,12f
lwz r0,0(r4) /* Less chance of a reject with word ops */
lwz r6,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
lbz r0,0(r4)
stb r0,0(r3)
15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
blr
.Lunwind_stack_nonvmx_copy:
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
.Lvmx_copy:
#ifdef CONFIG_ALTIVEC
mflr r0
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl enter_vmx_ops
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
ld r4,STK_REG(R30)(r1)
ld r5,STK_REG(R29)(r1)
mtlr r0
/*
* We prefetch both the source and destination using enhanced touch
* instructions. We use a stream ID of 0 for the load side and
* 1 for the store side.
*/
clrrdi r6,r4,7
clrrdi r9,r3,7
ori r9,r9,1 /* stream=1 */
srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
cmpldi r7,0x3FF
ble 1f
li r7,0x3FF
1: lis r0,0x0E00 /* depth=7 */
sldi r7,r7,7
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
lis r8,0x8000 /* GO=1 */
clrldi r8,r8,32
dcbt 0,r6,0b01000
dcbt 0,r7,0b01010
dcbtst 0,r9,0b01000
dcbtst 0,r10,0b01010
eieio
dcbt 0,r8,0b01010 /* GO */
beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
* slower permute loop.
*/
xor r6,r4,r3
rldicl. r6,r6,0,(64-4)
bne .Lvmx_unaligned_copy
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
lbz r0,0(r4)
addi r4,r4,1
stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
ld r0,0(r4)
addi r4,r4,8
std r0,0(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
bf cr7*4+3,5f
lvx v1,0,r4
addi r4,r4,16
stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
lvx v7,0,r4
lvx v6,r4,r9
lvx v5,r4,r10
lvx v4,r4,r11
lvx v3,r4,r12
lvx v2,r4,r14
lvx v1,r4,r15
lvx v0,r4,r16
addi r4,r4,128
stvx v7,0,r3
stvx v6,r3,r9
stvx v5,r3,r10
stvx v4,r3,r11
stvx v3,r3,r12
stvx v2,r3,r14
stvx v1,r3,r15
stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
lvx v1,0,r4
addi r4,r4,16
stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
mtocrf 0x01,r5
bf cr7*4+0,12f
ld r0,0(r4)
addi r4,r4,8
std r0,0(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
lbz r0,0(r4)
stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
b exit_vmx_ops /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
lbz r0,0(r4)
addi r4,r4,1
stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
lwz r0,0(r4) /* Less chance of a reject with word ops */
lwz r7,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r7,4(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
LVS(v16,0,r4) /* Setup permute control vector */
lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
lvx v7,0,r4
VPERM(v8,v0,v7,v16)
lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
lvx v5,r4,r10
VPERM(v10,v6,v5,v16)
lvx v4,r4,r11
VPERM(v11,v5,v4,v16)
lvx v3,r4,r12
VPERM(v12,v4,v3,v16)
lvx v2,r4,r14
VPERM(v13,v3,v2,v16)
lvx v1,r4,r15
VPERM(v14,v2,v1,v16)
lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
stvx v12,r3,r12
stvx v13,r3,r14
stvx v14,r3,r15
stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
addi r4,r4,-16 /* Unwind the +16 load offset */
mtocrf 0x01,r5
bf cr7*4+0,12f
lwz r0,0(r4) /* Less chance of a reject with word ops */
lwz r6,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
lbz r0,0(r4)
stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
b exit_vmx_ops /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
|
aixcc-public/challenge-001-exemplar-source
| 4,272
|
tools/testing/selftests/powerpc/copyloops/memcpy_64.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#include <asm/kasan.h>
#ifndef SELFTEST_CASE
/* For big-endian, 0 == most CPUs, 1 == POWER6, 2 == Cell */
#define SELFTEST_CASE 0
#endif
.align 7
_GLOBAL_TOC_KASAN(memcpy)
BEGIN_FTR_SECTION
#ifdef __LITTLE_ENDIAN__
cmpdi cr7,r5,0
#else
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */
#endif
FTR_SECTION_ELSE
#ifdef CONFIG_PPC_BOOK3S_64
b memcpy_power7
#endif
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#ifdef __LITTLE_ENDIAN__
/* dumb little-endian memcpy that will get replaced at runtime */
addi r9,r3,-1
addi r4,r4,-1
beqlr cr7
mtctr r5
1: lbzu r10,1(r4)
stbu r10,1(r9)
bdnz 1b
blr
#else
PPC_MTOCRF(0x01,r5)
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
/* Below we want to nop out the bne if we're on a CPU that has the
CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
cleared.
At the time of writing the only CPU that has this combination of bits
set is Power6. */
test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
bne .Ldst_unaligned
ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,2f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,3f
1: ld r9,8(r4)
std r8,8(r3)
2: ldu r8,16(r4)
stdu r9,16(r3)
bdnz 1b
3: std r8,8(r3)
beq 3f
addi r3,r3,16
.Ldo_tail:
bf cr7*4+1,1f
lwz r9,8(r4)
addi r4,r4,4
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
lhz r9,8(r4)
addi r4,r4,2
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
lbz r9,8(r4)
stb r9,0(r3)
3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpdi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,0f
ld r9,0(r4) # 3+2n loads, 2+2n stores
ld r0,8(r4)
sld r6,r9,r10
ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,4f
ld r0,8(r4)
# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
b 2f
0: ld r0,0(r4) # 4+2n loads, 3+2n stores
ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,3f
# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1: or r7,r7,r6
ld r0,8(r4)
std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
3: std r12,8(r3)
or r7,r7,r6
4: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
std r12,24(r3)
beq 4f
cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
ble cr1,6f
ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
6:
bf cr7*4+1,1f
rotldi r9,r9,32
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
.Ldst_unaligned:
PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
lhzx r0,r7,r4
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
lwz r0,0(r4)
lwz r9,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
#endif
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL_KASAN(memcpy)
|
aixcc-public/challenge-001-exemplar-source
| 2,258
|
tools/testing/selftests/powerpc/copyloops/mem_64.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* String handling functions for PowerPC.
*
* Copyright (C) 1996 Paul Mackerras.
*/
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/kasan.h>
#ifndef CONFIG_KASAN
_GLOBAL(__memset16)
rlwimi r4,r4,16,0,15
/* fall through */
_GLOBAL(__memset32)
rldimi r4,r4,32,0
/* fall through */
_GLOBAL(__memset64)
neg r0,r3
andi. r0,r0,7
cmplw cr1,r5,r0
b .Lms
EXPORT_SYMBOL(__memset16)
EXPORT_SYMBOL(__memset32)
EXPORT_SYMBOL(__memset64)
#endif
_GLOBAL_KASAN(memset)
neg r0,r3
rlwimi r4,r4,8,16,23
andi. r0,r0,7 /* # bytes to be 8-byte aligned */
rlwimi r4,r4,16,0,15
cmplw cr1,r5,r0 /* do we get that far? */
rldimi r4,r4,32,0
.Lms: PPC_MTOCRF(1,r0)
mr r6,r3
blt cr1,8f
beq 3f /* if already 8-byte aligned */
subf r5,r0,r5
bf 31,1f
stb r4,0(r6)
addi r6,r6,1
1: bf 30,2f
sth r4,0(r6)
addi r6,r6,2
2: bf 29,3f
stw r4,0(r6)
addi r6,r6,4
3: srdi. r0,r5,6
clrldi r5,r5,58
mtctr r0
beq 5f
.balign 16
4: std r4,0(r6)
std r4,8(r6)
std r4,16(r6)
std r4,24(r6)
std r4,32(r6)
std r4,40(r6)
std r4,48(r6)
std r4,56(r6)
addi r6,r6,64
bdnz 4b
5: srwi. r0,r5,3
clrlwi r5,r5,29
PPC_MTOCRF(1,r0)
beq 8f
bf 29,6f
std r4,0(r6)
std r4,8(r6)
std r4,16(r6)
std r4,24(r6)
addi r6,r6,32
6: bf 30,7f
std r4,0(r6)
std r4,8(r6)
addi r6,r6,16
7: bf 31,8f
std r4,0(r6)
addi r6,r6,8
8: cmpwi r5,0
PPC_MTOCRF(1,r5)
beqlr
bf 29,9f
stw r4,0(r6)
addi r6,r6,4
9: bf 30,10f
sth r4,0(r6)
addi r6,r6,2
10: bflr 31
stb r4,0(r6)
blr
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL_KASAN(memset)
_GLOBAL_TOC_KASAN(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
b memcpy
_GLOBAL(backwards_memcpy)
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
add r6,r3,r5
add r4,r4,r5
beq 2f
andi. r0,r6,3
mtctr r7
bne 5f
.balign 16
1: lwz r7,-4(r4)
lwzu r8,-8(r4)
stw r7,-4(r6)
stwu r8,-8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,-4(r4)
subi r5,r5,4
stwu r0,-4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
4: lbzu r0,-1(r4)
stbu r0,-1(r6)
bdnz 4b
blr
5: mtctr r0
6: lbzu r7,-1(r4)
stbu r7,-1(r6)
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
EXPORT_SYMBOL(memmove)
EXPORT_SYMBOL_KASAN(memmove)
|
aixcc-public/challenge-001-exemplar-source
| 12,150
|
tools/testing/selftests/powerpc/copyloops/copyuser_power7.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Copyright (C) IBM Corporation, 2011
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
#include <asm/ppc_asm.h>
#ifndef SELFTEST_CASE
/* 0 == don't use VMX, 1 == use VMX */
#define SELFTEST_CASE 0
#endif
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
#else
#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
#endif
.macro err1
100:
EX_TABLE(100b,.Ldo_err1)
.endm
.macro err2
200:
EX_TABLE(200b,.Ldo_err2)
.endm
#ifdef CONFIG_ALTIVEC
.macro err3
300:
EX_TABLE(300b,.Ldo_err3)
.endm
.macro err4
400:
EX_TABLE(400b,.Ldo_err4)
.endm
.Ldo_err4:
ld r16,STK_REG(R16)(r1)
ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1)
.Ldo_err3:
bl exit_vmx_usercopy
ld r0,STACKFRAMESIZE+16(r1)
mtlr r0
b .Lexit
#endif /* CONFIG_ALTIVEC */
.Ldo_err2:
ld r22,STK_REG(R22)(r1)
ld r21,STK_REG(R21)(r1)
ld r20,STK_REG(R20)(r1)
ld r19,STK_REG(R19)(r1)
ld r18,STK_REG(R18)(r1)
ld r17,STK_REG(R17)(r1)
ld r16,STK_REG(R16)(r1)
ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1)
.Lexit:
addi r1,r1,STACKFRAMESIZE
.Ldo_err1:
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
b __copy_tofrom_user_base
_GLOBAL(__copy_tofrom_user_power7)
cmpldi r5,16
cmpldi cr1,r5,3328
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
blt .Lshort_copy
#ifdef CONFIG_ALTIVEC
test_feature = SELFTEST_CASE
BEGIN_FTR_SECTION
bgt cr1,.Lvmx_copy
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
/* Get the source 8B aligned */
neg r6,r4
mtocrf 0x01,r6
clrldi r6,r6,(64-3)
bf cr7*4+3,1f
err1; lbz r0,0(r4)
addi r4,r4,1
err1; stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
err1; lhz r0,0(r4)
addi r4,r4,2
err1; sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
err1; lwz r0,0(r4)
addi r4,r4,4
err1; stw r0,0(r3)
addi r3,r3,4
3: sub r5,r5,r6
cmpldi r5,128
blt 5f
mflr r0
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
std r17,STK_REG(R17)(r1)
std r18,STK_REG(R18)(r1)
std r19,STK_REG(R19)(r1)
std r20,STK_REG(R20)(r1)
std r21,STK_REG(R21)(r1)
std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1)
srdi r6,r5,7
mtctr r6
/* Now do cacheline (128B) sized loads and stores. */
.align 5
4:
err2; ld r0,0(r4)
err2; ld r6,8(r4)
err2; ld r7,16(r4)
err2; ld r8,24(r4)
err2; ld r9,32(r4)
err2; ld r10,40(r4)
err2; ld r11,48(r4)
err2; ld r12,56(r4)
err2; ld r14,64(r4)
err2; ld r15,72(r4)
err2; ld r16,80(r4)
err2; ld r17,88(r4)
err2; ld r18,96(r4)
err2; ld r19,104(r4)
err2; ld r20,112(r4)
err2; ld r21,120(r4)
addi r4,r4,128
err2; std r0,0(r3)
err2; std r6,8(r3)
err2; std r7,16(r3)
err2; std r8,24(r3)
err2; std r9,32(r3)
err2; std r10,40(r3)
err2; std r11,48(r3)
err2; std r12,56(r3)
err2; std r14,64(r3)
err2; std r15,72(r3)
err2; std r16,80(r3)
err2; std r17,88(r3)
err2; std r18,96(r3)
err2; std r19,104(r3)
err2; std r20,112(r3)
err2; std r21,120(r3)
addi r3,r3,128
bdnz 4b
clrldi r5,r5,(64-7)
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(R20)(r1)
ld r21,STK_REG(R21)(r1)
ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE
/* Up to 127B to go */
5: srdi r6,r5,4
mtocrf 0x01,r6
6: bf cr7*4+1,7f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
err1; ld r7,16(r4)
err1; ld r8,24(r4)
err1; ld r9,32(r4)
err1; ld r10,40(r4)
err1; ld r11,48(r4)
err1; ld r12,56(r4)
addi r4,r4,64
err1; std r0,0(r3)
err1; std r6,8(r3)
err1; std r7,16(r3)
err1; std r8,24(r3)
err1; std r9,32(r3)
err1; std r10,40(r3)
err1; std r11,48(r3)
err1; std r12,56(r3)
addi r3,r3,64
/* Up to 63B to go */
7: bf cr7*4+2,8f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
err1; ld r7,16(r4)
err1; ld r8,24(r4)
addi r4,r4,32
err1; std r0,0(r3)
err1; std r6,8(r3)
err1; std r7,16(r3)
err1; std r8,24(r3)
addi r3,r3,32
/* Up to 31B to go */
8: bf cr7*4+3,9f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
addi r4,r4,16
err1; std r0,0(r3)
err1; std r6,8(r3)
addi r3,r3,16
9: clrldi r5,r5,(64-4)
/* Up to 15B to go */
.Lshort_copy:
mtocrf 0x01,r5
bf cr7*4+0,12f
err1; lwz r0,0(r4) /* Less chance of a reject with word ops */
err1; lwz r6,4(r4)
addi r4,r4,8
err1; stw r0,0(r3)
err1; stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
err1; lwz r0,0(r4)
addi r4,r4,4
err1; stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
err1; lhz r0,0(r4)
addi r4,r4,2
err1; sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
err1; lbz r0,0(r4)
err1; stb r0,0(r3)
15: li r3,0
blr
.Lunwind_stack_nonvmx_copy:
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
.Lvmx_copy:
#ifdef CONFIG_ALTIVEC
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl enter_vmx_usercopy
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
ld r4,STK_REG(R30)(r1)
ld r5,STK_REG(R29)(r1)
mtlr r0
/*
* We prefetch both the source and destination using enhanced touch
* instructions. We use a stream ID of 0 for the load side and
* 1 for the store side.
*/
clrrdi r6,r4,7
clrrdi r9,r3,7
ori r9,r9,1 /* stream=1 */
srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
cmpldi r7,0x3FF
ble 1f
li r7,0x3FF
1: lis r0,0x0E00 /* depth=7 */
sldi r7,r7,7
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
lis r8,0x8000 /* GO=1 */
clrldi r8,r8,32
/* setup read stream 0 */
dcbt 0,r6,0b01000 /* addr from */
dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */
dcbtst 0,r9,0b01000 /* addr to */
dcbtst 0,r10,0b01010 /* length and depth to */
eieio
dcbt 0,r8,0b01010 /* all streams GO */
beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
* slower permute loop.
*/
xor r6,r4,r3
rldicl. r6,r6,0,(64-4)
bne .Lvmx_unaligned_copy
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
err3; lbz r0,0(r4)
addi r4,r4,1
err3; stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
err3; ld r0,0(r4)
addi r4,r4,8
err3; std r0,0(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
bf cr7*4+3,5f
err3; lvx v1,0,r4
addi r4,r4,16
err3; stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
err4; lvx v7,0,r4
err4; lvx v6,r4,r9
err4; lvx v5,r4,r10
err4; lvx v4,r4,r11
err4; lvx v3,r4,r12
err4; lvx v2,r4,r14
err4; lvx v1,r4,r15
err4; lvx v0,r4,r16
addi r4,r4,128
err4; stvx v7,0,r3
err4; stvx v6,r3,r9
err4; stvx v5,r3,r10
err4; stvx v4,r3,r11
err4; stvx v3,r3,r12
err4; stvx v2,r3,r14
err4; stvx v1,r3,r15
err4; stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
err3; lvx v1,0,r4
addi r4,r4,16
err3; stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
mtocrf 0x01,r5
bf cr7*4+0,12f
err3; ld r0,0(r4)
addi r4,r4,8
err3; std r0,0(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
b exit_vmx_usercopy /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
err3; lbz r0,0(r4)
addi r4,r4,1
err3; stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
err3; lwz r7,4(r4)
addi r4,r4,8
err3; stw r0,0(r3)
err3; stw r7,4(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
LVS(v16,0,r4) /* Setup permute control vector */
err3; lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
err3; stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
err3; lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
err4; lvx v7,0,r4
VPERM(v8,v0,v7,v16)
err4; lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
err4; lvx v5,r4,r10
VPERM(v10,v6,v5,v16)
err4; lvx v4,r4,r11
VPERM(v11,v5,v4,v16)
err4; lvx v3,r4,r12
VPERM(v12,v4,v3,v16)
err4; lvx v2,r4,r14
VPERM(v13,v3,v2,v16)
err4; lvx v1,r4,r15
VPERM(v14,v2,v1,v16)
err4; lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
err4; stvx v8,0,r3
err4; stvx v9,r3,r9
err4; stvx v10,r3,r10
err4; stvx v11,r3,r11
err4; stvx v12,r3,r12
err4; stvx v13,r3,r14
err4; stvx v14,r3,r15
err4; stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
err3; lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
err3; stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
addi r4,r4,-16 /* Unwind the +16 load offset */
mtocrf 0x01,r5
bf cr7*4+0,12f
err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
err3; lwz r6,4(r4)
addi r4,r4,8
err3; stw r0,0(r3)
err3; stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
b exit_vmx_usercopy /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
|
aixcc-public/challenge-001-exemplar-source
| 11,151
|
tools/testing/selftests/powerpc/copyloops/copyuser_64.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#ifndef SELFTEST_CASE
/* 0 == most CPUs, 1 == POWER6, 2 == Cell */
#define SELFTEST_CASE 0
#endif
#ifdef __BIG_ENDIAN__
#define sLd sld /* Shift towards low-numbered address. */
#define sHd srd /* Shift towards high-numbered address. */
#else
#define sLd srd /* Shift towards low-numbered address. */
#define sHd sld /* Shift towards high-numbered address. */
#endif
/*
* These macros are used to generate exception table entries.
* The exception handlers below use the original arguments
* (stored on the stack) and the point where we're up to in
* the destination buffer, i.e. the address of the first
* unmodified byte. Generally r3 points into the destination
* buffer, but the first unmodified byte is at a variable
* offset from r3. In the code below, the symbol r3_offset
* is set to indicate the current offset at each point in
* the code. This offset is then used as a negative offset
* from the exception handler code, and those instructions
* before the exception handlers are addi instructions that
* adjust r3 to point to the correct place.
*/
.macro lex /* exception handler for load */
100: EX_TABLE(100b, .Lld_exc - r3_offset)
.endm
.macro stex /* exception handler for store */
100: EX_TABLE(100b, .Lst_exc - r3_offset)
.endm
.align 7
_GLOBAL_TOC(__copy_tofrom_user)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
b __copy_tofrom_user_power7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#endif
_GLOBAL(__copy_tofrom_user_base)
/* first check for a 4kB copy on a 4kB boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
or r0,r3,r4
neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
andi. r0,r0,4095
std r3,-24(r1)
crand cr0*4+2,cr0*4+2,cr6*4+2
std r4,-16(r1)
std r5,-8(r1)
dcbt 0,r4
beq .Lcopy_page_4K
andi. r6,r6,7
PPC_MTOCRF(0x01,r5)
blt cr1,.Lshort_copy
/* Below we want to nop out the bne if we're on a CPU that has the
* CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
* cleared.
* At the time of writing the only CPU that has this combination of bits
* set is Power6.
*/
test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
bne .Ldst_unaligned
ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
r3_offset = 16
test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blt cr1,.Ldo_tail /* if < 16 bytes to copy */
srdi r0,r5,5
cmpdi cr1,r0,0
lex; ld r7,0(r4)
lex; ld r6,8(r4)
addi r4,r4,16
mtctr r0
andi. r0,r5,0x10
beq 22f
addi r3,r3,16
r3_offset = 0
addi r4,r4,-16
mr r9,r7
mr r8,r6
beq cr1,72f
21:
lex; ld r7,16(r4)
lex; ld r6,24(r4)
addi r4,r4,32
stex; std r9,0(r3)
r3_offset = 8
stex; std r8,8(r3)
r3_offset = 16
22:
lex; ld r9,0(r4)
lex; ld r8,8(r4)
stex; std r7,16(r3)
r3_offset = 24
stex; std r6,24(r3)
addi r3,r3,32
r3_offset = 0
bdnz 21b
72:
stex; std r9,0(r3)
r3_offset = 8
stex; std r8,8(r3)
r3_offset = 16
andi. r5,r5,0xf
beq+ 3f
addi r4,r4,16
.Ldo_tail:
addi r3,r3,16
r3_offset = 0
bf cr7*4+0,246f
lex; ld r9,0(r4)
addi r4,r4,8
stex; std r9,0(r3)
addi r3,r3,8
246: bf cr7*4+1,1f
lex; lwz r9,0(r4)
addi r4,r4,4
stex; stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
lex; lhz r9,0(r4)
addi r4,r4,2
stex; sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
lex; lbz r9,0(r4)
stex; stb r9,0(r3)
3: li r3,0
blr
.Lsrc_unaligned:
r3_offset = 16
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpldi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,28f
lex; ld r9,0(r4) /* 3+2n loads, 2+2n stores */
lex; ld r0,8(r4)
sLd r6,r9,r10
lex; ldu r9,16(r4)
sHd r7,r0,r11
sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
lex; ld r0,8(r4)
b 2f
28:
lex; ld r0,0(r4) /* 4+2n loads, 3+2n stores */
lex; ldu r9,8(r4)
sLd r8,r0,r10
addi r3,r3,-8
r3_offset = 24
blt cr6,5f
lex; ld r0,8(r4)
sHd r12,r9,r11
sLd r6,r9,r10
lex; ldu r9,16(r4)
or r12,r8,r12
sHd r7,r0,r11
sLd r8,r0,r10
addi r3,r3,16
r3_offset = 8
beq cr6,78f
1: or r7,r7,r6
lex; ld r0,8(r4)
stex; std r12,8(r3)
r3_offset = 16
2: sHd r12,r9,r11
sLd r6,r9,r10
lex; ldu r9,16(r4)
or r12,r8,r12
stex; stdu r7,16(r3)
r3_offset = 8
sHd r7,r0,r11
sLd r8,r0,r10
bdnz 1b
78:
stex; std r12,8(r3)
r3_offset = 16
or r7,r7,r6
79:
stex; std r7,16(r3)
r3_offset = 24
5: sHd r12,r9,r11
or r12,r8,r12
stex; std r12,24(r3)
r3_offset = 32
bne 6f
li r3,0
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
r3_offset = 0
sLd r9,r9,r10
ble cr1,7f
lex; ld r0,8(r4)
sHd r7,r0,r11
or r9,r7,r9
7:
bf cr7*4+1,1f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
#endif
stex; stw r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,32
#endif
addi r3,r3,4
1: bf cr7*4+2,2f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
#endif
stex; sth r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,16
#endif
addi r3,r3,2
2: bf cr7*4+3,3f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
#endif
stex; stb r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,8
#endif
3: li r3,0
blr
.Ldst_unaligned:
r3_offset = 0
PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
bf cr7*4+3,1f
100: EX_TABLE(100b, .Lld_exc_r7)
lbz r0,0(r4)
100: EX_TABLE(100b, .Lst_exc_r7)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
100: EX_TABLE(100b, .Lld_exc_r7)
lhzx r0,r7,r4
100: EX_TABLE(100b, .Lst_exc_r7)
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
100: EX_TABLE(100b, .Lld_exc_r7)
lwzx r0,r7,r4
100: EX_TABLE(100b, .Lst_exc_r7)
stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
r3_offset = 0
bf cr7*4+0,1f
lex; lwz r0,0(r4)
lex; lwz r9,4(r4)
addi r4,r4,8
stex; stw r0,0(r3)
stex; stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lex; lwz r0,0(r4)
addi r4,r4,4
stex; stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lex; lhz r0,0(r4)
addi r4,r4,2
stex; sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lex; lbz r0,0(r4)
stex; stb r0,0(r3)
4: li r3,0
blr
/*
* exception handlers follow
* we have to return the number of bytes not copied
* for an exception on a load, we set the rest of the destination to 0
* Note that the number of bytes of instructions for adjusting r3 needs
* to equal the amount of the adjustment, due to the trick of using
* .Lld_exc - r3_offset as the handler address.
*/
.Lld_exc_r7:
add r3,r3,r7
b .Lld_exc
/* adjust by 24 */
addi r3,r3,8
nop
/* adjust by 16 */
addi r3,r3,8
nop
/* adjust by 8 */
addi r3,r3,8
nop
/*
* Here we have had a fault on a load and r3 points to the first
* unmodified byte of the destination. We use the original arguments
* and r3 to work out how much wasn't copied. Since we load some
* distance ahead of the stores, we continue copying byte-by-byte until
* we hit the load fault again in order to copy as much as possible.
*/
.Lld_exc:
ld r6,-24(r1)
ld r4,-16(r1)
ld r5,-8(r1)
subf r6,r6,r3
add r4,r4,r6
subf r5,r6,r5 /* #bytes left to go */
/*
* first see if we can copy any more bytes before hitting another exception
*/
mtctr r5
r3_offset = 0
100: EX_TABLE(100b, .Ldone)
43: lbz r0,0(r4)
addi r4,r4,1
stex; stb r0,0(r3)
addi r3,r3,1
bdnz 43b
li r3,0 /* huh? all copied successfully this time? */
blr
/*
* here we have trapped again, amount remaining is in ctr.
*/
.Ldone:
mfctr r3
blr
/*
* exception handlers for stores: we need to work out how many bytes
* weren't copied, and we may need to copy some more.
* Note that the number of bytes of instructions for adjusting r3 needs
* to equal the amount of the adjustment, due to the trick of using
* .Lst_exc - r3_offset as the handler address.
*/
.Lst_exc_r7:
add r3,r3,r7
b .Lst_exc
/* adjust by 24 */
addi r3,r3,8
nop
/* adjust by 16 */
addi r3,r3,8
nop
/* adjust by 8 */
addi r3,r3,4
/* adjust by 4 */
addi r3,r3,4
.Lst_exc:
ld r6,-24(r1) /* original destination pointer */
ld r4,-16(r1) /* original source pointer */
ld r5,-8(r1) /* original number of bytes */
add r7,r6,r5
/*
* If the destination pointer isn't 8-byte aligned,
* we may have got the exception as a result of a
* store that overlapped a page boundary, so we may be
* able to copy a few more bytes.
*/
17: andi. r0,r3,7
beq 19f
subf r8,r6,r3 /* #bytes copied */
100: EX_TABLE(100b,19f)
lbzx r0,r8,r4
100: EX_TABLE(100b,19f)
stb r0,0(r3)
addi r3,r3,1
cmpld r3,r7
blt 17b
19: subf r3,r3,r7 /* #bytes not copied in r3 */
blr
/*
* Routine to copy a whole page of data, optimized for POWER4.
* On POWER4 it is more than 50% faster than the simple loop
* above (following the .Ldst_aligned label).
*/
.macro exc
100: EX_TABLE(100b, .Labort)
.endm
.Lcopy_page_4K:
std r31,-32(1)
std r30,-40(1)
std r29,-48(1)
std r28,-56(1)
std r27,-64(1)
std r26,-72(1)
std r25,-80(1)
std r24,-88(1)
std r23,-96(1)
std r22,-104(1)
std r21,-112(1)
std r20,-120(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r0,5
0: addi r5,r5,-24
mtctr r0
exc; ld r22,640(4)
exc; ld r21,512(4)
exc; ld r20,384(4)
exc; ld r11,256(4)
exc; ld r9,128(4)
exc; ld r7,0(4)
exc; ld r25,648(4)
exc; ld r24,520(4)
exc; ld r23,392(4)
exc; ld r10,264(4)
exc; ld r8,136(4)
exc; ldu r6,8(4)
cmpwi r5,24
1:
exc; std r22,648(3)
exc; std r21,520(3)
exc; std r20,392(3)
exc; std r11,264(3)
exc; std r9,136(3)
exc; std r7,8(3)
exc; ld r28,648(4)
exc; ld r27,520(4)
exc; ld r26,392(4)
exc; ld r31,264(4)
exc; ld r30,136(4)
exc; ld r29,8(4)
exc; std r25,656(3)
exc; std r24,528(3)
exc; std r23,400(3)
exc; std r10,272(3)
exc; std r8,144(3)
exc; std r6,16(3)
exc; ld r22,656(4)
exc; ld r21,528(4)
exc; ld r20,400(4)
exc; ld r11,272(4)
exc; ld r9,144(4)
exc; ld r7,16(4)
exc; std r28,664(3)
exc; std r27,536(3)
exc; std r26,408(3)
exc; std r31,280(3)
exc; std r30,152(3)
exc; stdu r29,24(3)
exc; ld r25,664(4)
exc; ld r24,536(4)
exc; ld r23,408(4)
exc; ld r10,280(4)
exc; ld r8,152(4)
exc; ldu r6,24(4)
bdnz 1b
exc; std r22,648(3)
exc; std r21,520(3)
exc; std r20,392(3)
exc; std r11,264(3)
exc; std r9,136(3)
exc; std r7,8(3)
addi r4,r4,640
addi r3,r3,648
bge 0b
mtctr r5
exc; ld r7,0(4)
exc; ld r8,8(4)
exc; ldu r9,16(4)
3:
exc; ld r10,8(4)
exc; std r7,8(3)
exc; ld r7,16(4)
exc; std r8,16(3)
exc; ld r8,24(4)
exc; std r9,24(3)
exc; ldu r9,32(4)
exc; stdu r10,32(3)
bdnz 3b
4:
exc; ld r10,8(4)
exc; std r7,8(3)
exc; std r8,16(3)
exc; std r9,24(3)
exc; std r10,32(3)
9: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
li r3,0
blr
/*
* on an exception, reset to the beginning and jump back into the
* standard __copy_tofrom_user
*/
.Labort:
ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
ld r3,-24(r1)
ld r4,-16(r1)
li r5,4096
b .Ldst_aligned
EXPORT_SYMBOL(__copy_tofrom_user)
|
aixcc-public/challenge-001-exemplar-source
| 4,168
|
tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) IBM Corporation, 2011
* Derived from copyuser_power7.s by Anton Blanchard <anton@au.ibm.com>
* Author - Balbir Singh <bsingharora@gmail.com>
*/
#include <asm/ppc_asm.h>
#include <asm/errno.h>
#include <asm/export.h>
.macro err1
100:
EX_TABLE(100b,.Ldo_err1)
.endm
.macro err2
200:
EX_TABLE(200b,.Ldo_err2)
.endm
.macro err3
300: EX_TABLE(300b,.Ldone)
.endm
.Ldo_err2:
ld r22,STK_REG(R22)(r1)
ld r21,STK_REG(R21)(r1)
ld r20,STK_REG(R20)(r1)
ld r19,STK_REG(R19)(r1)
ld r18,STK_REG(R18)(r1)
ld r17,STK_REG(R17)(r1)
ld r16,STK_REG(R16)(r1)
ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1)
addi r1,r1,STACKFRAMESIZE
.Ldo_err1:
/* Do a byte by byte copy to get the exact remaining size */
mtctr r7
46:
err3; lbz r0,0(r4)
addi r4,r4,1
err3; stb r0,0(r3)
addi r3,r3,1
bdnz 46b
li r3,0
blr
.Ldone:
mfctr r3
blr
_GLOBAL(copy_mc_generic)
mr r7,r5
cmpldi r5,16
blt .Lshort_copy
.Lcopy:
/* Get the source 8B aligned */
neg r6,r4
mtocrf 0x01,r6
clrldi r6,r6,(64-3)
bf cr7*4+3,1f
err1; lbz r0,0(r4)
addi r4,r4,1
err1; stb r0,0(r3)
addi r3,r3,1
subi r7,r7,1
1: bf cr7*4+2,2f
err1; lhz r0,0(r4)
addi r4,r4,2
err1; sth r0,0(r3)
addi r3,r3,2
subi r7,r7,2
2: bf cr7*4+1,3f
err1; lwz r0,0(r4)
addi r4,r4,4
err1; stw r0,0(r3)
addi r3,r3,4
subi r7,r7,4
3: sub r5,r5,r6
cmpldi r5,128
mflr r0
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
std r17,STK_REG(R17)(r1)
std r18,STK_REG(R18)(r1)
std r19,STK_REG(R19)(r1)
std r20,STK_REG(R20)(r1)
std r21,STK_REG(R21)(r1)
std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1)
blt 5f
srdi r6,r5,7
mtctr r6
/* Now do cacheline (128B) sized loads and stores. */
.align 5
4:
err2; ld r0,0(r4)
err2; ld r6,8(r4)
err2; ld r8,16(r4)
err2; ld r9,24(r4)
err2; ld r10,32(r4)
err2; ld r11,40(r4)
err2; ld r12,48(r4)
err2; ld r14,56(r4)
err2; ld r15,64(r4)
err2; ld r16,72(r4)
err2; ld r17,80(r4)
err2; ld r18,88(r4)
err2; ld r19,96(r4)
err2; ld r20,104(r4)
err2; ld r21,112(r4)
err2; ld r22,120(r4)
addi r4,r4,128
err2; std r0,0(r3)
err2; std r6,8(r3)
err2; std r8,16(r3)
err2; std r9,24(r3)
err2; std r10,32(r3)
err2; std r11,40(r3)
err2; std r12,48(r3)
err2; std r14,56(r3)
err2; std r15,64(r3)
err2; std r16,72(r3)
err2; std r17,80(r3)
err2; std r18,88(r3)
err2; std r19,96(r3)
err2; std r20,104(r3)
err2; std r21,112(r3)
err2; std r22,120(r3)
addi r3,r3,128
subi r7,r7,128
bdnz 4b
clrldi r5,r5,(64-7)
/* Up to 127B to go */
5: srdi r6,r5,4
mtocrf 0x01,r6
6: bf cr7*4+1,7f
err2; ld r0,0(r4)
err2; ld r6,8(r4)
err2; ld r8,16(r4)
err2; ld r9,24(r4)
err2; ld r10,32(r4)
err2; ld r11,40(r4)
err2; ld r12,48(r4)
err2; ld r14,56(r4)
addi r4,r4,64
err2; std r0,0(r3)
err2; std r6,8(r3)
err2; std r8,16(r3)
err2; std r9,24(r3)
err2; std r10,32(r3)
err2; std r11,40(r3)
err2; std r12,48(r3)
err2; std r14,56(r3)
addi r3,r3,64
subi r7,r7,64
7: ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(R20)(r1)
ld r21,STK_REG(R21)(r1)
ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE
/* Up to 63B to go */
bf cr7*4+2,8f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
err1; ld r8,16(r4)
err1; ld r9,24(r4)
addi r4,r4,32
err1; std r0,0(r3)
err1; std r6,8(r3)
err1; std r8,16(r3)
err1; std r9,24(r3)
addi r3,r3,32
subi r7,r7,32
/* Up to 31B to go */
8: bf cr7*4+3,9f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
addi r4,r4,16
err1; std r0,0(r3)
err1; std r6,8(r3)
addi r3,r3,16
subi r7,r7,16
9: clrldi r5,r5,(64-4)
/* Up to 15B to go */
.Lshort_copy:
mtocrf 0x01,r5
bf cr7*4+0,12f
err1; lwz r0,0(r4) /* Less chance of a reject with word ops */
err1; lwz r6,4(r4)
addi r4,r4,8
err1; stw r0,0(r3)
err1; stw r6,4(r3)
addi r3,r3,8
subi r7,r7,8
12: bf cr7*4+1,13f
err1; lwz r0,0(r4)
addi r4,r4,4
err1; stw r0,0(r3)
addi r3,r3,4
subi r7,r7,4
13: bf cr7*4+2,14f
err1; lhz r0,0(r4)
addi r4,r4,2
err1; sth r0,0(r3)
addi r3,r3,2
subi r7,r7,2
14: bf cr7*4+3,15f
err1; lbz r0,0(r4)
err1; stb r0,0(r3)
15: li r3,0
blr
EXPORT_SYMBOL_GPL(copy_mc_generic);
|
aixcc-public/challenge-001-exemplar-source
| 2,691
|
tools/testing/selftests/powerpc/stringloops/strlen_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* strlen() for PPC32
*
* Copyright (C) 2018 Christophe Leroy CS Systemes d'Information.
*
* Inspired from glibc implementation
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/cache.h>
.text
/*
* Algorithm:
*
* 1) Given a word 'x', we can test to see if it contains any 0 bytes
* by subtracting 0x01010101, and seeing if any of the high bits of each
* byte changed from 0 to 1. This works because the least significant
* 0 byte must have had no incoming carry (otherwise it's not the least
* significant), so it is 0x00 - 0x01 == 0xff. For all other
* byte values, either they have the high bit set initially, or when
* 1 is subtracted you get a value in the range 0x00-0x7f, none of which
* have their high bit set. The expression here is
* (x - 0x01010101) & ~x & 0x80808080), which gives 0x00000000 when
* there were no 0x00 bytes in the word. You get 0x80 in bytes that
* match, but possibly false 0x80 matches in the next more significant
* byte to a true match due to carries. For little-endian this is
* of no consequence since the least significant match is the one
* we're interested in, but big-endian needs method 2 to find which
* byte matches.
* 2) Given a word 'x', we can test to see _which_ byte was zero by
* calculating ~(((x & ~0x80808080) - 0x80808080 - 1) | x | ~0x80808080).
* This produces 0x80 in each byte that was zero, and 0x00 in all
* the other bytes. The '| ~0x80808080' clears the low 7 bits in each
* byte, and the '| x' part ensures that bytes with the high bit set
* produce 0x00. The addition will carry into the high bit of each byte
* iff that byte had one of its low 7 bits set. We can then just see
* which was the most significant bit set and divide by 8 to find how
* many to add to the index.
* This is from the book 'The PowerPC Compiler Writer's Guide',
* by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren.
*/
_GLOBAL(strlen)
andi. r0, r3, 3
lis r7, 0x0101
addi r10, r3, -4
addic r7, r7, 0x0101 /* r7 = 0x01010101 (lomagic) & clear XER[CA] */
rotlwi r6, r7, 31 /* r6 = 0x80808080 (himagic) */
bne- 3f
.balign IFETCH_ALIGN_BYTES
1: lwzu r9, 4(r10)
2: subf r8, r7, r9
and. r8, r8, r6
beq+ 1b
andc. r8, r8, r9
beq+ 1b
andc r8, r9, r6
orc r9, r9, r6
subfe r8, r6, r8
nor r8, r8, r9
cntlzw r8, r8
subf r3, r3, r10
srwi r8, r8, 3
add r3, r3, r8
blr
/* Missaligned string: make sure bytes before string are seen not 0 */
3: xor r10, r10, r0
orc r8, r8, r8
lwzu r9, 4(r10)
slwi r0, r0, 3
srw r8, r8, r0
orc r9, r9, r8
b 2b
EXPORT_SYMBOL(strlen)
|
aixcc-public/challenge-001-exemplar-source
| 11,692
|
tools/testing/selftests/powerpc/stringloops/memcmp_64.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Author: Anton Blanchard <anton@au.ibm.com>
* Copyright 2015 IBM Corporation.
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/ppc-opcode.h>
#define off8 r6
#define off16 r7
#define off24 r8
#define rA r9
#define rB r10
#define rC r11
#define rD r27
#define rE r28
#define rF r29
#define rG r30
#define rH r31
#ifdef __LITTLE_ENDIAN__
#define LH lhbrx
#define LW lwbrx
#define LD ldbrx
#define LVS lvsr
#define VPERM(_VRT,_VRA,_VRB,_VRC) \
vperm _VRT,_VRB,_VRA,_VRC
#else
#define LH lhzx
#define LW lwzx
#define LD ldx
#define LVS lvsl
#define VPERM(_VRT,_VRA,_VRB,_VRC) \
vperm _VRT,_VRA,_VRB,_VRC
#endif
#define VMX_THRESH 4096
#define ENTER_VMX_OPS \
mflr r0; \
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
bl enter_vmx_ops; \
cmpwi cr1,r3,0; \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
ld r4,STK_REG(R30)(r1); \
ld r5,STK_REG(R29)(r1); \
addi r1,r1,STACKFRAMESIZE; \
mtlr r0
#define EXIT_VMX_OPS \
mflr r0; \
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
bl exit_vmx_ops; \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
ld r4,STK_REG(R30)(r1); \
ld r5,STK_REG(R29)(r1); \
addi r1,r1,STACKFRAMESIZE; \
mtlr r0
/*
* LD_VSR_CROSS16B load the 2nd 16 bytes for _vaddr which is unaligned with
* 16 bytes boundary and permute the result with the 1st 16 bytes.
* | y y y y y y y y y y y y y 0 1 2 | 3 4 5 6 7 8 9 a b c d e f z z z |
* ^ ^ ^
* 0xbbbb10 0xbbbb20 0xbbb30
* ^
* _vaddr
*
*
* _vmask is the mask generated by LVS
* _v1st_qw is the 1st aligned QW of current addr which is already loaded.
* for example: 0xyyyyyyyyyyyyy012 for big endian
* _v2nd_qw is the 2nd aligned QW of cur _vaddr to be loaded.
* for example: 0x3456789abcdefzzz for big endian
* The permute result is saved in _v_res.
* for example: 0x0123456789abcdef for big endian.
*/
#define LD_VSR_CROSS16B(_vaddr,_vmask,_v1st_qw,_v2nd_qw,_v_res) \
lvx _v2nd_qw,_vaddr,off16; \
VPERM(_v_res,_v1st_qw,_v2nd_qw,_vmask)
/*
* There are 2 categories for memcmp:
* 1) src/dst has the same offset to the 8 bytes boundary. The handlers
* are named like .Lsameoffset_xxxx
* 2) src/dst has different offset to the 8 bytes boundary. The handlers
* are named like .Ldiffoffset_xxxx
*/
_GLOBAL_TOC(memcmp)
cmpdi cr1,r5,0
/* Use the short loop if the src/dst addresses are not
* with the same offset of 8 bytes align boundary.
*/
xor r6,r3,r4
andi. r6,r6,7
/* Fall back to short loop if compare at aligned addrs
* with less than 8 bytes.
*/
cmpdi cr6,r5,7
beq cr1,.Lzero
bgt cr6,.Lno_short
.Lshort:
mtctr r5
1: lbz rA,0(r3)
lbz rB,0(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,1(r3)
lbz rB,1(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,2(r3)
lbz rB,2(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,3(r3)
lbz rB,3(r4)
subf. rC,rB,rA
bne .Lnon_zero
addi r3,r3,4
addi r4,r4,4
bdnz 1b
.Lzero:
li r3,0
blr
.Lno_short:
dcbt 0,r3
dcbt 0,r4
bne .Ldiffoffset_8bytes_make_align_start
.Lsameoffset_8bytes_make_align_start:
/* attempt to compare bytes not aligned with 8 bytes so that
* rest comparison can run based on 8 bytes alignment.
*/
andi. r6,r3,7
/* Try to compare the first double word which is not 8 bytes aligned:
* load the first double word at (src & ~7UL) and shift left appropriate
* bits before comparision.
*/
rlwinm r6,r3,3,26,28
beq .Lsameoffset_8bytes_aligned
clrrdi r3,r3,3
clrrdi r4,r4,3
LD rA,0,r3
LD rB,0,r4
sld rA,rA,r6
sld rB,rB,r6
cmpld cr0,rA,rB
srwi r6,r6,3
bne cr0,.LcmpAB_lightweight
subfic r6,r6,8
subf. r5,r6,r5
addi r3,r3,8
addi r4,r4,8
beq .Lzero
.Lsameoffset_8bytes_aligned:
/* now we are aligned with 8 bytes.
* Use .Llong loop if left cmp bytes are equal or greater than 32B.
*/
cmpdi cr6,r5,31
bgt cr6,.Llong
.Lcmp_lt32bytes:
/* compare 1 ~ 31 bytes, at least r3 addr is 8 bytes aligned now */
cmpdi cr5,r5,7
srdi r0,r5,3
ble cr5,.Lcmp_rest_lt8bytes
/* handle 8 ~ 31 bytes */
clrldi r5,r5,61
mtctr r0
2:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
bdnz 2b
cmpwi r5,0
beq .Lzero
.Lcmp_rest_lt8bytes:
/*
* Here we have less than 8 bytes to compare. At least s1 is aligned to
* 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
* page boundary, otherwise we might read past the end of the buffer and
* trigger a page fault. We use 4K as the conservative minimum page
* size. If we detect that case we go to the byte-by-byte loop.
*
* Otherwise the next double word is loaded from s1 and s2, and shifted
* right to compare the appropriate bits.
*/
clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
cmpdi r6,0xff8
bgt .Lshort
subfic r6,r5,8
slwi r6,r6,3
LD rA,0,r3
LD rB,0,r4
srd rA,rA,r6
srd rB,rB,r6
cmpld cr0,rA,rB
bne cr0,.LcmpAB_lightweight
b .Lzero
.Lnon_zero:
mr r3,rC
blr
.Llong:
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
/* Try to use vmx loop if length is equal or greater than 4K */
cmpldi cr6,r5,VMX_THRESH
bge cr6,.Lsameoffset_vmx_cmp
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
.Llong_novmx_cmp:
#endif
/* At least s1 addr is aligned with 8 bytes */
li off8,8
li off16,16
li off24,24
std r31,-8(r1)
std r30,-16(r1)
std r29,-24(r1)
std r28,-32(r1)
std r27,-40(r1)
srdi r0,r5,5
mtctr r0
andi. r5,r5,31
LD rA,0,r3
LD rB,0,r4
LD rC,off8,r3
LD rD,off8,r4
LD rE,off16,r3
LD rF,off16,r4
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
addi r3,r3,32
addi r4,r4,32
bdz .Lfirst32
LD rA,0,r3
LD rB,0,r4
cmpld cr1,rC,rD
LD rC,off8,r3
LD rD,off8,r4
cmpld cr6,rE,rF
LD rE,off16,r3
LD rF,off16,r4
cmpld cr7,rG,rH
bne cr0,.LcmpAB
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
bne cr1,.LcmpCD
addi r3,r3,32
addi r4,r4,32
bdz .Lsecond32
.balign 16
1: LD rA,0,r3
LD rB,0,r4
cmpld cr1,rC,rD
bne cr6,.LcmpEF
LD rC,off8,r3
LD rD,off8,r4
cmpld cr6,rE,rF
bne cr7,.LcmpGH
LD rE,off16,r3
LD rF,off16,r4
cmpld cr7,rG,rH
bne cr0,.LcmpAB
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
bne cr1,.LcmpCD
addi r3,r3,32
addi r4,r4,32
bdnz 1b
.Lsecond32:
cmpld cr1,rC,rD
bne cr6,.LcmpEF
cmpld cr6,rE,rF
bne cr7,.LcmpGH
cmpld cr7,rG,rH
bne cr0,.LcmpAB
bne cr1,.LcmpCD
bne cr6,.LcmpEF
bne cr7,.LcmpGH
.Ltail:
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
cmpdi r5,0
beq .Lzero
b .Lshort
.Lfirst32:
cmpld cr1,rC,rD
cmpld cr6,rE,rF
cmpld cr7,rG,rH
bne cr0,.LcmpAB
bne cr1,.LcmpCD
bne cr6,.LcmpEF
bne cr7,.LcmpGH
b .Ltail
.LcmpAB:
li r3,1
bgt cr0,.Lout
li r3,-1
b .Lout
.LcmpCD:
li r3,1
bgt cr1,.Lout
li r3,-1
b .Lout
.LcmpEF:
li r3,1
bgt cr6,.Lout
li r3,-1
b .Lout
.LcmpGH:
li r3,1
bgt cr7,.Lout
li r3,-1
.Lout:
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
blr
.LcmpAB_lightweight: /* skip NV GPRS restore */
li r3,1
bgtlr
li r3,-1
blr
#ifdef CONFIG_ALTIVEC
.Lsameoffset_vmx_cmp:
/* Enter with src/dst addrs has the same offset with 8 bytes
* align boundary.
*
* There is an optimization based on following fact: memcmp()
* prones to fail early at the first 32 bytes.
* Before applying VMX instructions which will lead to 32x128bits
* VMX regs load/restore penalty, we compare the first 32 bytes
* so that we can catch the ~80% fail cases.
*/
li r0,4
mtctr r0
.Lsameoffset_prechk_32B_loop:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
addi r5,r5,-8
bdnz .Lsameoffset_prechk_32B_loop
ENTER_VMX_OPS
beq cr1,.Llong_novmx_cmp
3:
/* need to check whether r4 has the same offset with r3
* for 16 bytes boundary.
*/
xor r0,r3,r4
andi. r0,r0,0xf
bne .Ldiffoffset_vmx_cmp_start
/* len is no less than 4KB. Need to align with 16 bytes further.
*/
andi. rA,r3,8
LD rA,0,r3
beq 4f
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
addi r5,r5,-8
beq cr0,4f
/* save and restore cr0 */
mfocrf r5,128
EXIT_VMX_OPS
mtocrf 128,r5
b .LcmpAB_lightweight
4:
/* compare 32 bytes for each loop */
srdi r0,r5,5
mtctr r0
clrldi r5,r5,59
li off16,16
.balign 16
5:
lvx v0,0,r3
lvx v1,0,r4
VCMPEQUD_RC(v0,v0,v1)
bnl cr6,7f
lvx v0,off16,r3
lvx v1,off16,r4
VCMPEQUD_RC(v0,v0,v1)
bnl cr6,6f
addi r3,r3,32
addi r4,r4,32
bdnz 5b
EXIT_VMX_OPS
cmpdi r5,0
beq .Lzero
b .Lcmp_lt32bytes
6:
addi r3,r3,16
addi r4,r4,16
7:
/* diff the last 16 bytes */
EXIT_VMX_OPS
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
li off8,8
bne cr0,.LcmpAB_lightweight
LD rA,off8,r3
LD rB,off8,r4
cmpld cr0,rA,rB
bne cr0,.LcmpAB_lightweight
b .Lzero
#endif
.Ldiffoffset_8bytes_make_align_start:
/* now try to align s1 with 8 bytes */
rlwinm r6,r3,3,26,28
beq .Ldiffoffset_align_s1_8bytes
clrrdi r3,r3,3
LD rA,0,r3
LD rB,0,r4 /* unaligned load */
sld rA,rA,r6
srd rA,rA,r6
srd rB,rB,r6
cmpld cr0,rA,rB
srwi r6,r6,3
bne cr0,.LcmpAB_lightweight
subfic r6,r6,8
subf. r5,r6,r5
addi r3,r3,8
add r4,r4,r6
beq .Lzero
.Ldiffoffset_align_s1_8bytes:
/* now s1 is aligned with 8 bytes. */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
/* only do vmx ops when the size equal or greater than 4K bytes */
cmpdi cr5,r5,VMX_THRESH
bge cr5,.Ldiffoffset_vmx_cmp
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
.Ldiffoffset_novmx_cmp:
#endif
cmpdi cr5,r5,31
ble cr5,.Lcmp_lt32bytes
#ifdef CONFIG_ALTIVEC
b .Llong_novmx_cmp
#else
b .Llong
#endif
#ifdef CONFIG_ALTIVEC
.Ldiffoffset_vmx_cmp:
/* perform a 32 bytes pre-checking before
* enable VMX operations.
*/
li r0,4
mtctr r0
.Ldiffoffset_prechk_32B_loop:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
addi r5,r5,-8
bdnz .Ldiffoffset_prechk_32B_loop
ENTER_VMX_OPS
beq cr1,.Ldiffoffset_novmx_cmp
.Ldiffoffset_vmx_cmp_start:
/* Firstly try to align r3 with 16 bytes */
andi. r6,r3,0xf
li off16,16
beq .Ldiffoffset_vmx_s1_16bytes_align
LVS v3,0,r3
LVS v4,0,r4
lvx v5,0,r3
lvx v6,0,r4
LD_VSR_CROSS16B(r3,v3,v5,v7,v9)
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
bnl cr6,.Ldiffoffset_vmx_diff_found
subfic r6,r6,16
subf r5,r6,r5
add r3,r3,r6
add r4,r4,r6
.Ldiffoffset_vmx_s1_16bytes_align:
/* now s1 is aligned with 16 bytes */
lvx v6,0,r4
LVS v4,0,r4
srdi r6,r5,5 /* loop for 32 bytes each */
clrldi r5,r5,59
mtctr r6
.balign 16
.Ldiffoffset_vmx_32bytesloop:
/* the first qw of r4 was saved in v6 */
lvx v9,0,r3
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
vor v6,v8,v8
bnl cr6,.Ldiffoffset_vmx_diff_found
addi r3,r3,16
addi r4,r4,16
lvx v9,0,r3
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
vor v6,v8,v8
bnl cr6,.Ldiffoffset_vmx_diff_found
addi r3,r3,16
addi r4,r4,16
bdnz .Ldiffoffset_vmx_32bytesloop
EXIT_VMX_OPS
cmpdi r5,0
beq .Lzero
b .Lcmp_lt32bytes
.Ldiffoffset_vmx_diff_found:
EXIT_VMX_OPS
/* anyway, the diff will appear in next 16 bytes */
li r5,16
b .Lcmp_lt32bytes
#endif
EXPORT_SYMBOL(memcmp)
|
aixcc-public/challenge-001-exemplar-source
| 1,300
|
tools/testing/selftests/powerpc/pmu/loop.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
*/
#include <ppc-asm.h>
.text
FUNC_START(thirty_two_instruction_loop)
cmpdi r3,0
beqlr
addi r4,r3,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1
addi r4,r4,1 # 28 addi's
subi r3,r3,1
b FUNC_NAME(thirty_two_instruction_loop)
FUNC_END(thirty_two_instruction_loop)
FUNC_START(thirty_two_instruction_loop_with_ll_sc)
cmpdi r3,0
beqlr
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1 # 5
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
1: ldarx r6,0,r4 # 10
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1 # 15
addi r5,r5,1
addi r5,r5,1
stdcx. r6,0,r4
bne- 1b
addi r5,r5,1 # 20
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1 # 25
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1
addi r5,r5,1 # 30
subi r3,r3,1
b FUNC_NAME(thirty_two_instruction_loop_with_ll_sc)
FUNC_END(thirty_two_instruction_loop_with_ll_sc)
|
aixcc-public/challenge-001-exemplar-source
| 6,347
|
tools/testing/selftests/powerpc/lib/reg.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* test helper assembly functions
*
* Copyright (C) 2016 Simon Guo, IBM Corporation.
*/
#include <ppc-asm.h>
#include "reg.h"
/* Non volatile GPR - unsigned long buf[18] */
FUNC_START(load_gpr)
ld 14, 0*8(3)
ld 15, 1*8(3)
ld 16, 2*8(3)
ld 17, 3*8(3)
ld 18, 4*8(3)
ld 19, 5*8(3)
ld 20, 6*8(3)
ld 21, 7*8(3)
ld 22, 8*8(3)
ld 23, 9*8(3)
ld 24, 10*8(3)
ld 25, 11*8(3)
ld 26, 12*8(3)
ld 27, 13*8(3)
ld 28, 14*8(3)
ld 29, 15*8(3)
ld 30, 16*8(3)
ld 31, 17*8(3)
blr
FUNC_END(load_gpr)
FUNC_START(store_gpr)
std 14, 0*8(3)
std 15, 1*8(3)
std 16, 2*8(3)
std 17, 3*8(3)
std 18, 4*8(3)
std 19, 5*8(3)
std 20, 6*8(3)
std 21, 7*8(3)
std 22, 8*8(3)
std 23, 9*8(3)
std 24, 10*8(3)
std 25, 11*8(3)
std 26, 12*8(3)
std 27, 13*8(3)
std 28, 14*8(3)
std 29, 15*8(3)
std 30, 16*8(3)
std 31, 17*8(3)
blr
FUNC_END(store_gpr)
/* Double Precision Float - double buf[32] */
FUNC_START(store_fpr)
stfd 0, 0*8(3)
stfd 1, 1*8(3)
stfd 2, 2*8(3)
stfd 3, 3*8(3)
stfd 4, 4*8(3)
stfd 5, 5*8(3)
stfd 6, 6*8(3)
stfd 7, 7*8(3)
stfd 8, 8*8(3)
stfd 9, 9*8(3)
stfd 10, 10*8(3)
stfd 11, 11*8(3)
stfd 12, 12*8(3)
stfd 13, 13*8(3)
stfd 14, 14*8(3)
stfd 15, 15*8(3)
stfd 16, 16*8(3)
stfd 17, 17*8(3)
stfd 18, 18*8(3)
stfd 19, 19*8(3)
stfd 20, 20*8(3)
stfd 21, 21*8(3)
stfd 22, 22*8(3)
stfd 23, 23*8(3)
stfd 24, 24*8(3)
stfd 25, 25*8(3)
stfd 26, 26*8(3)
stfd 27, 27*8(3)
stfd 28, 28*8(3)
stfd 29, 29*8(3)
stfd 30, 30*8(3)
stfd 31, 31*8(3)
blr
FUNC_END(store_fpr)
/* VMX/VSX registers - unsigned long buf[128] */
FUNC_START(loadvsx)
lis 4, 0
LXVD2X (0,(4),(3))
addi 4, 4, 16
LXVD2X (1,(4),(3))
addi 4, 4, 16
LXVD2X (2,(4),(3))
addi 4, 4, 16
LXVD2X (3,(4),(3))
addi 4, 4, 16
LXVD2X (4,(4),(3))
addi 4, 4, 16
LXVD2X (5,(4),(3))
addi 4, 4, 16
LXVD2X (6,(4),(3))
addi 4, 4, 16
LXVD2X (7,(4),(3))
addi 4, 4, 16
LXVD2X (8,(4),(3))
addi 4, 4, 16
LXVD2X (9,(4),(3))
addi 4, 4, 16
LXVD2X (10,(4),(3))
addi 4, 4, 16
LXVD2X (11,(4),(3))
addi 4, 4, 16
LXVD2X (12,(4),(3))
addi 4, 4, 16
LXVD2X (13,(4),(3))
addi 4, 4, 16
LXVD2X (14,(4),(3))
addi 4, 4, 16
LXVD2X (15,(4),(3))
addi 4, 4, 16
LXVD2X (16,(4),(3))
addi 4, 4, 16
LXVD2X (17,(4),(3))
addi 4, 4, 16
LXVD2X (18,(4),(3))
addi 4, 4, 16
LXVD2X (19,(4),(3))
addi 4, 4, 16
LXVD2X (20,(4),(3))
addi 4, 4, 16
LXVD2X (21,(4),(3))
addi 4, 4, 16
LXVD2X (22,(4),(3))
addi 4, 4, 16
LXVD2X (23,(4),(3))
addi 4, 4, 16
LXVD2X (24,(4),(3))
addi 4, 4, 16
LXVD2X (25,(4),(3))
addi 4, 4, 16
LXVD2X (26,(4),(3))
addi 4, 4, 16
LXVD2X (27,(4),(3))
addi 4, 4, 16
LXVD2X (28,(4),(3))
addi 4, 4, 16
LXVD2X (29,(4),(3))
addi 4, 4, 16
LXVD2X (30,(4),(3))
addi 4, 4, 16
LXVD2X (31,(4),(3))
addi 4, 4, 16
LXVD2X (32,(4),(3))
addi 4, 4, 16
LXVD2X (33,(4),(3))
addi 4, 4, 16
LXVD2X (34,(4),(3))
addi 4, 4, 16
LXVD2X (35,(4),(3))
addi 4, 4, 16
LXVD2X (36,(4),(3))
addi 4, 4, 16
LXVD2X (37,(4),(3))
addi 4, 4, 16
LXVD2X (38,(4),(3))
addi 4, 4, 16
LXVD2X (39,(4),(3))
addi 4, 4, 16
LXVD2X (40,(4),(3))
addi 4, 4, 16
LXVD2X (41,(4),(3))
addi 4, 4, 16
LXVD2X (42,(4),(3))
addi 4, 4, 16
LXVD2X (43,(4),(3))
addi 4, 4, 16
LXVD2X (44,(4),(3))
addi 4, 4, 16
LXVD2X (45,(4),(3))
addi 4, 4, 16
LXVD2X (46,(4),(3))
addi 4, 4, 16
LXVD2X (47,(4),(3))
addi 4, 4, 16
LXVD2X (48,(4),(3))
addi 4, 4, 16
LXVD2X (49,(4),(3))
addi 4, 4, 16
LXVD2X (50,(4),(3))
addi 4, 4, 16
LXVD2X (51,(4),(3))
addi 4, 4, 16
LXVD2X (52,(4),(3))
addi 4, 4, 16
LXVD2X (53,(4),(3))
addi 4, 4, 16
LXVD2X (54,(4),(3))
addi 4, 4, 16
LXVD2X (55,(4),(3))
addi 4, 4, 16
LXVD2X (56,(4),(3))
addi 4, 4, 16
LXVD2X (57,(4),(3))
addi 4, 4, 16
LXVD2X (58,(4),(3))
addi 4, 4, 16
LXVD2X (59,(4),(3))
addi 4, 4, 16
LXVD2X (60,(4),(3))
addi 4, 4, 16
LXVD2X (61,(4),(3))
addi 4, 4, 16
LXVD2X (62,(4),(3))
addi 4, 4, 16
LXVD2X (63,(4),(3))
blr
FUNC_END(loadvsx)
FUNC_START(storevsx)
lis 4, 0
STXVD2X (0,(4),(3))
addi 4, 4, 16
STXVD2X (1,(4),(3))
addi 4, 4, 16
STXVD2X (2,(4),(3))
addi 4, 4, 16
STXVD2X (3,(4),(3))
addi 4, 4, 16
STXVD2X (4,(4),(3))
addi 4, 4, 16
STXVD2X (5,(4),(3))
addi 4, 4, 16
STXVD2X (6,(4),(3))
addi 4, 4, 16
STXVD2X (7,(4),(3))
addi 4, 4, 16
STXVD2X (8,(4),(3))
addi 4, 4, 16
STXVD2X (9,(4),(3))
addi 4, 4, 16
STXVD2X (10,(4),(3))
addi 4, 4, 16
STXVD2X (11,(4),(3))
addi 4, 4, 16
STXVD2X (12,(4),(3))
addi 4, 4, 16
STXVD2X (13,(4),(3))
addi 4, 4, 16
STXVD2X (14,(4),(3))
addi 4, 4, 16
STXVD2X (15,(4),(3))
addi 4, 4, 16
STXVD2X (16,(4),(3))
addi 4, 4, 16
STXVD2X (17,(4),(3))
addi 4, 4, 16
STXVD2X (18,(4),(3))
addi 4, 4, 16
STXVD2X (19,(4),(3))
addi 4, 4, 16
STXVD2X (20,(4),(3))
addi 4, 4, 16
STXVD2X (21,(4),(3))
addi 4, 4, 16
STXVD2X (22,(4),(3))
addi 4, 4, 16
STXVD2X (23,(4),(3))
addi 4, 4, 16
STXVD2X (24,(4),(3))
addi 4, 4, 16
STXVD2X (25,(4),(3))
addi 4, 4, 16
STXVD2X (26,(4),(3))
addi 4, 4, 16
STXVD2X (27,(4),(3))
addi 4, 4, 16
STXVD2X (28,(4),(3))
addi 4, 4, 16
STXVD2X (29,(4),(3))
addi 4, 4, 16
STXVD2X (30,(4),(3))
addi 4, 4, 16
STXVD2X (31,(4),(3))
addi 4, 4, 16
STXVD2X (32,(4),(3))
addi 4, 4, 16
STXVD2X (33,(4),(3))
addi 4, 4, 16
STXVD2X (34,(4),(3))
addi 4, 4, 16
STXVD2X (35,(4),(3))
addi 4, 4, 16
STXVD2X (36,(4),(3))
addi 4, 4, 16
STXVD2X (37,(4),(3))
addi 4, 4, 16
STXVD2X (38,(4),(3))
addi 4, 4, 16
STXVD2X (39,(4),(3))
addi 4, 4, 16
STXVD2X (40,(4),(3))
addi 4, 4, 16
STXVD2X (41,(4),(3))
addi 4, 4, 16
STXVD2X (42,(4),(3))
addi 4, 4, 16
STXVD2X (43,(4),(3))
addi 4, 4, 16
STXVD2X (44,(4),(3))
addi 4, 4, 16
STXVD2X (45,(4),(3))
addi 4, 4, 16
STXVD2X (46,(4),(3))
addi 4, 4, 16
STXVD2X (47,(4),(3))
addi 4, 4, 16
STXVD2X (48,(4),(3))
addi 4, 4, 16
STXVD2X (49,(4),(3))
addi 4, 4, 16
STXVD2X (50,(4),(3))
addi 4, 4, 16
STXVD2X (51,(4),(3))
addi 4, 4, 16
STXVD2X (52,(4),(3))
addi 4, 4, 16
STXVD2X (53,(4),(3))
addi 4, 4, 16
STXVD2X (54,(4),(3))
addi 4, 4, 16
STXVD2X (55,(4),(3))
addi 4, 4, 16
STXVD2X (56,(4),(3))
addi 4, 4, 16
STXVD2X (57,(4),(3))
addi 4, 4, 16
STXVD2X (58,(4),(3))
addi 4, 4, 16
STXVD2X (59,(4),(3))
addi 4, 4, 16
STXVD2X (60,(4),(3))
addi 4, 4, 16
STXVD2X (61,(4),(3))
addi 4, 4, 16
STXVD2X (62,(4),(3))
addi 4, 4, 16
STXVD2X (63,(4),(3))
blr
FUNC_END(storevsx)
|
aixcc-public/challenge-001-exemplar-source
| 1,704
|
tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
.data
.balign 8
success_message:
.ascii "success: switch_endian_test\n\0"
.balign 8
failure_message:
.ascii "failure: switch_endian_test\n\0"
.section ".toc"
.balign 8
pattern:
.8byte 0x5555AAAA5555AAAA
.text
FUNC_START(_start)
/* Load the pattern */
ld r15, pattern@TOC(%r2)
/* Setup CR, only CR2-CR4 are maintained */
lis r3, 0x00FF
ori r3, r3, 0xF000
mtcr r3
/* Load the pattern slightly modified into the registers */
mr r3, r15
addi r4, r15, 4
addi r5, r15, 32
mtlr r5
addi r5, r15, 5
addi r6, r15, 6
addi r7, r15, 7
addi r8, r15, 8
/* r9 - r12 are clobbered */
addi r13, r15, 13
addi r14, r15, 14
/* Skip r15 we're using it */
addi r16, r15, 16
addi r17, r15, 17
addi r18, r15, 18
addi r19, r15, 19
addi r20, r15, 20
addi r21, r15, 21
addi r22, r15, 22
addi r23, r15, 23
addi r24, r15, 24
addi r25, r15, 25
addi r26, r15, 26
addi r27, r15, 27
addi r28, r15, 28
addi r29, r15, 29
addi r30, r15, 30
addi r31, r15, 31
/*
* Call the syscall to switch endian.
* It clobbers r9-r12, XER, CTR and CR0-1,5-7.
*/
li r0, __NR_switch_endian
sc
tdi 0, 0, 0x48 // b +8 if the endian was switched
b .Lfail // exit if endian didn't switch
#include "check-reversed.S"
/* Flip back, r0 already has the switch syscall number */
.long 0x02000044 /* sc */
#include "check.S"
ld r4, success_message@got(%r2)
li r5, 28 // strlen(success_message)
li r14, 0 // exit status
.Lout:
li r0, __NR_write
li r3, 1 /* stdout */
sc
li r0, __NR_exit
mr r3, r14
sc
b .
.Lfail:
ld r4, failure_message@got(%r2)
li r5, 28 // strlen(failure_message)
li r14, 1
b .Lout
|
aixcc-public/challenge-001-exemplar-source
| 1,977
|
tools/testing/selftests/powerpc/switch_endian/check.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
/*
* Checks that registers contain what we expect, ie. they were not clobbered by
* the syscall.
*
* r15: pattern to check registers against.
*
* At the end r3 == 0 if everything's OK.
*/
nop # guaranteed to be illegal in reverse-endian
mr r9,r15
cmpd r9,r3 # check r3
bne 1f
addi r9,r15,4 # check r4
cmpd r9,r4
bne 1f
lis r9,0x00FF # check CR
ori r9,r9,0xF000
mfcr r10
and r10,r10,r9
cmpw r9,r10
addi r9,r15,34
bne 1f
addi r9,r15,32 # check LR
mflr r10
cmpd r9,r10
bne 1f
addi r9,r15,5 # check r5
cmpd r9,r5
bne 1f
addi r9,r15,6 # check r6
cmpd r9,r6
bne 1f
addi r9,r15,7 # check r7
cmpd r9,r7
bne 1f
addi r9,r15,8 # check r8
cmpd r9,r8
bne 1f
addi r9,r15,13 # check r13
cmpd r9,r13
bne 1f
addi r9,r15,14 # check r14
cmpd r9,r14
bne 1f
addi r9,r15,16 # check r16
cmpd r9,r16
bne 1f
addi r9,r15,17 # check r17
cmpd r9,r17
bne 1f
addi r9,r15,18 # check r18
cmpd r9,r18
bne 1f
addi r9,r15,19 # check r19
cmpd r9,r19
bne 1f
addi r9,r15,20 # check r20
cmpd r9,r20
bne 1f
addi r9,r15,21 # check r21
cmpd r9,r21
bne 1f
addi r9,r15,22 # check r22
cmpd r9,r22
bne 1f
addi r9,r15,23 # check r23
cmpd r9,r23
bne 1f
addi r9,r15,24 # check r24
cmpd r9,r24
bne 1f
addi r9,r15,25 # check r25
cmpd r9,r25
bne 1f
addi r9,r15,26 # check r26
cmpd r9,r26
bne 1f
addi r9,r15,27 # check r27
cmpd r9,r27
bne 1f
addi r9,r15,28 # check r28
cmpd r9,r28
bne 1f
addi r9,r15,29 # check r29
cmpd r9,r29
bne 1f
addi r9,r15,30 # check r30
cmpd r9,r30
bne 1f
addi r9,r15,31 # check r31
cmpd r9,r31
bne 1f
b 2f
1: mr r3, r9
li r0, __NR_exit
sc
2: li r0, __NR_switch_endian
nop
|
aixcc-public/challenge-001-exemplar-source
| 1,067
|
tools/testing/selftests/powerpc/security/branch_loops.S
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2019, Michael Ellerman, IBM Corp.
*/
#include <ppc-asm.h>
.data
jump_table:
.long 0x0
.long (.Lstate_1 - .Lstate_0)
.long (.Lstate_2 - .Lstate_0)
.long (.Lstate_3 - .Lstate_0)
.long (.Lstate_4 - .Lstate_0)
.long (.Lstate_5 - .Lstate_0)
.long (.Lstate_6 - .Lstate_0)
.long (.Lstate_7 - .Lstate_0)
.text
#define ITER_SHIFT 31
.macro state number
.balign 32
.Lstate_\number:
.if \number==7
li r3, 0
.else
li r3, \number+1
.endif
b .Lloop
.endm
FUNC_START(pattern_cache_loop)
li r3, 0
li r4, 1
sldi r4, r4, ITER_SHIFT
.Lloop: cmpdi r4, 0
beqlr
addi r4, r4, -1
ld r6, jump_table@got(%r2)
sldi r5, r3, 2
lwax r6, r5, r6
ld r7, .Lstate_0@got(%r2)
add r6, r6, r7
mtctr r6
bctr
state 0
state 1
state 2
state 3
state 4
state 5
state 6
state 7
FUNC_END(pattern_cache_loop)
FUNC_START(indirect_branch_loop)
li r3, 1
sldi r3, r3, ITER_SHIFT
1: cmpdi r3, 0
beqlr
addi r3, r3, -1
ld r4, 2f@got(%r2)
mtctr r4
bctr
.balign 32
2: b 1b
FUNC_END(indirect_branch_loop)
|
aixcc-public/challenge-001-exemplar-source
| 2,818
|
tools/testing/selftests/powerpc/tm/tm-signal.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*/
#include "basic_asm.h"
#include "gpr_asm.h"
#include "fpu_asm.h"
#include "vmx_asm.h"
#include "vsx_asm.h"
/*
* Large caveat here being that the caller cannot expect the
* signal to always be sent! The hardware can (AND WILL!) abort
* the transaction between the tbegin and the tsuspend (however
* unlikely it seems or infrequently it actually happens).
* You have been warned.
*/
/* long tm_signal_self(pid_t pid, long *gprs, double *fps, vector *vms, vector *vss); */
FUNC_START(tm_signal_self_context_load)
PUSH_BASIC_STACK(512)
/*
* Don't strictly need to save and restore as it depends on if
* we're going to use them, however this reduces messy logic
*/
PUSH_VMX(STACK_FRAME_LOCAL(5,0),r8)
PUSH_FPU(512)
PUSH_NVREGS_BELOW_FPU(512)
std r3, STACK_FRAME_PARAM(0)(sp) /* pid */
std r4, STACK_FRAME_PARAM(1)(sp) /* gps */
std r5, STACK_FRAME_PARAM(2)(sp) /* fps */
std r6, STACK_FRAME_PARAM(3)(sp) /* vms */
std r7, STACK_FRAME_PARAM(4)(sp) /* vss */
ld r3, STACK_FRAME_PARAM(1)(sp)
cmpdi r3, 0
beq skip_gpr_lc
bl load_gpr
skip_gpr_lc:
ld r3, STACK_FRAME_PARAM(2)(sp)
cmpdi r3, 0
beq skip_fpu_lc
bl load_fpu
skip_fpu_lc:
ld r3, STACK_FRAME_PARAM(3)(sp)
cmpdi r3, 0
beq skip_vmx_lc
bl load_vmx
skip_vmx_lc:
ld r3, STACK_FRAME_PARAM(4)(sp)
cmpdi r3, 0
beq skip_vsx_lc
bl load_vsx
skip_vsx_lc:
/*
* Set r3 (return value) before tbegin. Use the pid as a known
* 'all good' return value, zero is used to indicate a non-doomed
* transaction.
*/
ld r3, STACK_FRAME_PARAM(0)(sp)
tbegin.
beq 1f
tsuspend. /* Can't enter a syscall transactionally */
ld r3, STACK_FRAME_PARAM(1)(sp)
cmpdi r3, 0
beq skip_gpr_lt
/* Get the second half of the array */
addi r3, r3, 8 * 18
bl load_gpr
skip_gpr_lt:
ld r3, STACK_FRAME_PARAM(2)(sp)
cmpdi r3, 0
beq skip_fpu_lt
/* Get the second half of the array */
addi r3, r3, 8 * 18
bl load_fpu
skip_fpu_lt:
ld r3, STACK_FRAME_PARAM(3)(sp)
cmpdi r3, 0
beq skip_vmx_lt
/* Get the second half of the array */
addi r3, r3, 16 * 12
bl load_vmx
skip_vmx_lt:
ld r3, STACK_FRAME_PARAM(4)(sp)
cmpdi r3, 0
beq skip_vsx_lt
/* Get the second half of the array */
addi r3, r3, 16 * 12
bl load_vsx
skip_vsx_lt:
li r0, 37 /* sys_kill */
ld r3, STACK_FRAME_PARAM(0)(sp) /* pid */
li r4, 10 /* SIGUSR1 */
sc /* Taking the signal will doom the transaction */
tabort. 0
tresume. /* Be super sure we abort */
/*
* This will cause us to resume doomed transaction and cause
* hardware to cleanup, we'll end up at 1: anything between
* tresume. and 1: shouldn't ever run.
*/
li r3, 0
1:
POP_VMX(STACK_FRAME_LOCAL(5,0),r4)
POP_FPU(512)
POP_NVREGS_BELOW_FPU(512)
POP_BASIC_STACK(512)
blr
FUNC_END(tm_signal_self_context_load)
|
aixcc-public/challenge-001-exemplar-source
| 1,359
|
tools/testing/selftests/powerpc/math/vsx_asm.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*/
#include "basic_asm.h"
#include "vsx_asm.h"
#long check_vsx(vector int *r3);
#This function wraps storeing VSX regs to the end of an array and a
#call to a comparison function in C which boils down to a memcmp()
FUNC_START(check_vsx)
PUSH_BASIC_STACK(32)
std r3,STACK_FRAME_PARAM(0)(sp)
addi r3, r3, 16 * 12 #Second half of array
bl store_vsx
ld r3,STACK_FRAME_PARAM(0)(sp)
bl vsx_memcmp
POP_BASIC_STACK(32)
blr
FUNC_END(check_vsx)
# int preempt_vmx(vector int *varray, int *threads_starting,
# int *running);
# On starting will (atomically) decrement threads_starting as a signal
# that the VMX have been loaded with varray. Will proceed to check the
# validity of the VMX registers while running is not zero.
FUNC_START(preempt_vsx)
PUSH_BASIC_STACK(512)
std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
bl load_vsx
nop
sync
# Atomic DEC
ld r3,STACK_FRAME_PARAM(1)(sp)
1: lwarx r4,0,r3
addi r4,r4,-1
stwcx. r4,0,r3
bne- 1b
2: ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_vsx
nop
cmpdi r3,0
bne 3f
ld r4,STACK_FRAME_PARAM(2)(sp)
ld r5,0(r4)
cmpwi r5,0
bne 2b
3: POP_BASIC_STACK(512)
blr
FUNC_END(preempt_vsx)
|
aixcc-public/challenge-001-exemplar-source
| 2,384
|
tools/testing/selftests/powerpc/math/fpu_asm.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*/
#include "basic_asm.h"
#include "fpu_asm.h"
FUNC_START(check_fpu)
mr r4,r3
li r3,1 # assume a bad result
lfd f0,0(r4)
fcmpu cr1,f0,f14
bne cr1,1f
lfd f0,8(r4)
fcmpu cr1,f0,f15
bne cr1,1f
lfd f0,16(r4)
fcmpu cr1,f0,f16
bne cr1,1f
lfd f0,24(r4)
fcmpu cr1,f0,f17
bne cr1,1f
lfd f0,32(r4)
fcmpu cr1,f0,f18
bne cr1,1f
lfd f0,40(r4)
fcmpu cr1,f0,f19
bne cr1,1f
lfd f0,48(r4)
fcmpu cr1,f0,f20
bne cr1,1f
lfd f0,56(r4)
fcmpu cr1,f0,f21
bne cr1,1f
lfd f0,64(r4)
fcmpu cr1,f0,f22
bne cr1,1f
lfd f0,72(r4)
fcmpu cr1,f0,f23
bne cr1,1f
lfd f0,80(r4)
fcmpu cr1,f0,f24
bne cr1,1f
lfd f0,88(r4)
fcmpu cr1,f0,f25
bne cr1,1f
lfd f0,96(r4)
fcmpu cr1,f0,f26
bne cr1,1f
lfd f0,104(r4)
fcmpu cr1,f0,f27
bne cr1,1f
lfd f0,112(r4)
fcmpu cr1,f0,f28
bne cr1,1f
lfd f0,120(r4)
fcmpu cr1,f0,f29
bne cr1,1f
lfd f0,128(r4)
fcmpu cr1,f0,f30
bne cr1,1f
lfd f0,136(r4)
fcmpu cr1,f0,f31
bne cr1,1f
li r3,0 # Success!!!
1: blr
FUNC_START(test_fpu)
# r3 holds pointer to where to put the result of fork
# r4 holds pointer to the pid
# f14-f31 are non volatiles
PUSH_BASIC_STACK(256)
PUSH_FPU(256)
std r3,STACK_FRAME_PARAM(0)(sp) # Address of darray
std r4,STACK_FRAME_PARAM(1)(sp) # Address of pid
bl load_fpu
nop
li r0,__NR_fork
sc
# pass the result of the fork to the caller
ld r9,STACK_FRAME_PARAM(1)(sp)
std r3,0(r9)
ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_fpu
nop
POP_FPU(256)
POP_BASIC_STACK(256)
blr
FUNC_END(test_fpu)
# int preempt_fpu(double *darray, int *threads_running, int *running)
# On starting will (atomically) decrement not_ready as a signal that the FPU
# has been loaded with darray. Will proceed to check the validity of the FPU
# registers while running is not zero.
FUNC_START(preempt_fpu)
PUSH_BASIC_STACK(256)
PUSH_FPU(256)
std r3,STACK_FRAME_PARAM(0)(sp) # double *darray
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
bl load_fpu
nop
sync
# Atomic DEC
ld r3,STACK_FRAME_PARAM(1)(sp)
1: lwarx r4,0,r3
addi r4,r4,-1
stwcx. r4,0,r3
bne- 1b
2: ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_fpu
nop
cmpdi r3,0
bne 3f
ld r4,STACK_FRAME_PARAM(2)(sp)
ld r5,0(r4)
cmpwi r5,0
bne 2b
3: POP_FPU(256)
POP_BASIC_STACK(256)
blr
FUNC_END(preempt_fpu)
|
aixcc-public/challenge-001-exemplar-source
| 2,755
|
tools/testing/selftests/powerpc/math/vmx_asm.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*/
#include "basic_asm.h"
#include "vmx_asm.h"
# Should be safe from C, only touches r4, r5 and v0,v1,v2
FUNC_START(check_vmx)
PUSH_BASIC_STACK(32)
mr r4,r3
li r3,1 # assume a bad result
li r5,0
lvx v0,r5,r4
vcmpequd. v1,v0,v20
vmr v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v21
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v22
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v23
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v24
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v25
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v26
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v27
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v28
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v29
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v30
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v31
vand v2,v2,v1
li r5,STACK_FRAME_LOCAL(0,0)
stvx v2,r5,sp
ldx r0,r5,sp
cmpdi r0,0xffffffffffffffff
bne 1f
li r3,0
1: POP_BASIC_STACK(32)
blr
FUNC_END(check_vmx)
# Safe from C
FUNC_START(test_vmx)
# r3 holds pointer to where to put the result of fork
# r4 holds pointer to the pid
# v20-v31 are non-volatile
PUSH_BASIC_STACK(512)
std r3,STACK_FRAME_PARAM(0)(sp) # Address of varray
std r4,STACK_FRAME_PARAM(1)(sp) # address of pid
PUSH_VMX(STACK_FRAME_LOCAL(2,0),r4)
bl load_vmx
nop
li r0,__NR_fork
sc
# Pass the result of fork back to the caller
ld r9,STACK_FRAME_PARAM(1)(sp)
std r3,0(r9)
ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_vmx
nop
POP_VMX(STACK_FRAME_LOCAL(2,0),r4)
POP_BASIC_STACK(512)
blr
FUNC_END(test_vmx)
# int preempt_vmx(vector int *varray, int *threads_starting, int *running)
# On starting will (atomically) decrement threads_starting as a signal that
# the VMX have been loaded with varray. Will proceed to check the validity of
# the VMX registers while running is not zero.
FUNC_START(preempt_vmx)
PUSH_BASIC_STACK(512)
std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
# VMX need to write to 16 byte aligned addresses, skip STACK_FRAME_LOCAL(3,0)
PUSH_VMX(STACK_FRAME_LOCAL(4,0),r4)
bl load_vmx
nop
sync
# Atomic DEC
ld r3,STACK_FRAME_PARAM(1)(sp)
1: lwarx r4,0,r3
addi r4,r4,-1
stwcx. r4,0,r3
bne- 1b
2: ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_vmx
nop
cmpdi r3,0
bne 3f
ld r4,STACK_FRAME_PARAM(2)(sp)
ld r5,0(r4)
cmpwi r5,0
bne 2b
3: POP_VMX(STACK_FRAME_LOCAL(4,0),r4)
POP_BASIC_STACK(512)
blr
FUNC_END(preempt_vmx)
|
aixcc-public/challenge-001-exemplar-source
| 4,281
|
tools/testing/selftests/powerpc/pmu/ebb/busy_loop.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <ppc-asm.h>
.text
FUNC_START(core_busy_loop)
stdu %r1, -168(%r1)
std r14, 160(%r1)
std r15, 152(%r1)
std r16, 144(%r1)
std r17, 136(%r1)
std r18, 128(%r1)
std r19, 120(%r1)
std r20, 112(%r1)
std r21, 104(%r1)
std r22, 96(%r1)
std r23, 88(%r1)
std r24, 80(%r1)
std r25, 72(%r1)
std r26, 64(%r1)
std r27, 56(%r1)
std r28, 48(%r1)
std r29, 40(%r1)
std r30, 32(%r1)
std r31, 24(%r1)
li r3, 0x3030
std r3, -96(%r1)
li r4, 0x4040
std r4, -104(%r1)
li r5, 0x5050
std r5, -112(%r1)
li r6, 0x6060
std r6, -120(%r1)
li r7, 0x7070
std r7, -128(%r1)
li r8, 0x0808
std r8, -136(%r1)
li r9, 0x0909
std r9, -144(%r1)
li r10, 0x1010
std r10, -152(%r1)
li r11, 0x1111
std r11, -160(%r1)
li r14, 0x1414
std r14, -168(%r1)
li r15, 0x1515
std r15, -176(%r1)
li r16, 0x1616
std r16, -184(%r1)
li r17, 0x1717
std r17, -192(%r1)
li r18, 0x1818
std r18, -200(%r1)
li r19, 0x1919
std r19, -208(%r1)
li r20, 0x2020
std r20, -216(%r1)
li r21, 0x2121
std r21, -224(%r1)
li r22, 0x2222
std r22, -232(%r1)
li r23, 0x2323
std r23, -240(%r1)
li r24, 0x2424
std r24, -248(%r1)
li r25, 0x2525
std r25, -256(%r1)
li r26, 0x2626
std r26, -264(%r1)
li r27, 0x2727
std r27, -272(%r1)
li r28, 0x2828
std r28, -280(%r1)
li r29, 0x2929
std r29, -288(%r1)
li r30, 0x3030
li r31, 0x3131
li r3, 0
0: addi r3, r3, 1
cmpwi r3, 100
blt 0b
/* Return 1 (fail) unless we get through all the checks */
li r3, 1
/* Check none of our registers have been corrupted */
cmpwi r4, 0x4040
bne 1f
cmpwi r5, 0x5050
bne 1f
cmpwi r6, 0x6060
bne 1f
cmpwi r7, 0x7070
bne 1f
cmpwi r8, 0x0808
bne 1f
cmpwi r9, 0x0909
bne 1f
cmpwi r10, 0x1010
bne 1f
cmpwi r11, 0x1111
bne 1f
cmpwi r14, 0x1414
bne 1f
cmpwi r15, 0x1515
bne 1f
cmpwi r16, 0x1616
bne 1f
cmpwi r17, 0x1717
bne 1f
cmpwi r18, 0x1818
bne 1f
cmpwi r19, 0x1919
bne 1f
cmpwi r20, 0x2020
bne 1f
cmpwi r21, 0x2121
bne 1f
cmpwi r22, 0x2222
bne 1f
cmpwi r23, 0x2323
bne 1f
cmpwi r24, 0x2424
bne 1f
cmpwi r25, 0x2525
bne 1f
cmpwi r26, 0x2626
bne 1f
cmpwi r27, 0x2727
bne 1f
cmpwi r28, 0x2828
bne 1f
cmpwi r29, 0x2929
bne 1f
cmpwi r30, 0x3030
bne 1f
cmpwi r31, 0x3131
bne 1f
/* Load junk into all our registers before we reload them from the stack. */
li r3, 0xde
li r4, 0xad
li r5, 0xbe
li r6, 0xef
li r7, 0xde
li r8, 0xad
li r9, 0xbe
li r10, 0xef
li r11, 0xde
li r14, 0xad
li r15, 0xbe
li r16, 0xef
li r17, 0xde
li r18, 0xad
li r19, 0xbe
li r20, 0xef
li r21, 0xde
li r22, 0xad
li r23, 0xbe
li r24, 0xef
li r25, 0xde
li r26, 0xad
li r27, 0xbe
li r28, 0xef
li r29, 0xdd
ld r3, -96(%r1)
cmpwi r3, 0x3030
bne 1f
ld r4, -104(%r1)
cmpwi r4, 0x4040
bne 1f
ld r5, -112(%r1)
cmpwi r5, 0x5050
bne 1f
ld r6, -120(%r1)
cmpwi r6, 0x6060
bne 1f
ld r7, -128(%r1)
cmpwi r7, 0x7070
bne 1f
ld r8, -136(%r1)
cmpwi r8, 0x0808
bne 1f
ld r9, -144(%r1)
cmpwi r9, 0x0909
bne 1f
ld r10, -152(%r1)
cmpwi r10, 0x1010
bne 1f
ld r11, -160(%r1)
cmpwi r11, 0x1111
bne 1f
ld r14, -168(%r1)
cmpwi r14, 0x1414
bne 1f
ld r15, -176(%r1)
cmpwi r15, 0x1515
bne 1f
ld r16, -184(%r1)
cmpwi r16, 0x1616
bne 1f
ld r17, -192(%r1)
cmpwi r17, 0x1717
bne 1f
ld r18, -200(%r1)
cmpwi r18, 0x1818
bne 1f
ld r19, -208(%r1)
cmpwi r19, 0x1919
bne 1f
ld r20, -216(%r1)
cmpwi r20, 0x2020
bne 1f
ld r21, -224(%r1)
cmpwi r21, 0x2121
bne 1f
ld r22, -232(%r1)
cmpwi r22, 0x2222
bne 1f
ld r23, -240(%r1)
cmpwi r23, 0x2323
bne 1f
ld r24, -248(%r1)
cmpwi r24, 0x2424
bne 1f
ld r25, -256(%r1)
cmpwi r25, 0x2525
bne 1f
ld r26, -264(%r1)
cmpwi r26, 0x2626
bne 1f
ld r27, -272(%r1)
cmpwi r27, 0x2727
bne 1f
ld r28, -280(%r1)
cmpwi r28, 0x2828
bne 1f
ld r29, -288(%r1)
cmpwi r29, 0x2929
bne 1f
/* Load 0 (success) to return */
li r3, 0
1: ld r14, 160(%r1)
ld r15, 152(%r1)
ld r16, 144(%r1)
ld r17, 136(%r1)
ld r18, 128(%r1)
ld r19, 120(%r1)
ld r20, 112(%r1)
ld r21, 104(%r1)
ld r22, 96(%r1)
ld r23, 88(%r1)
ld r24, 80(%r1)
ld r25, 72(%r1)
ld r26, 64(%r1)
ld r27, 56(%r1)
ld r28, 48(%r1)
ld r29, 40(%r1)
ld r30, 32(%r1)
ld r31, 24(%r1)
addi %r1, %r1, 168
blr
|
aixcc-public/challenge-001-exemplar-source
| 7,495
|
tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <ppc-asm.h>
#include "reg.h"
/* ppc-asm.h defines most of the reg aliases, but not r1/r2. */
#define r1 1
#define r2 2
#define RFEBB .long 0x4c000924
/* Stack layout:
*
* ^
* User stack |
* Back chain ------+ <- r1 <-------+
* ... |
* Red zone / ABI Gap |
* ... |
* vr63 <+ |
* vr0 | |
* VSCR | |
* FSCR | |
* r31 | Save area |
* r0 | |
* XER | |
* CTR | |
* LR | |
* CCR <+ |
* ... <+ |
* LR | Caller frame |
* CCR | |
* Back chain <+ <- updated r1 --------+
*
*/
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define ABIGAP 512
#else
#define ABIGAP 288
#endif
#define NR_GPR 32
#define NR_SPR 6
#define NR_VSR 64
#define SAVE_AREA ((NR_GPR + NR_SPR) * 8 + (NR_VSR * 16))
#define CALLER_FRAME 112
#define STACK_FRAME (ABIGAP + SAVE_AREA + CALLER_FRAME)
#define CCR_SAVE (CALLER_FRAME)
#define LR_SAVE (CCR_SAVE + 8)
#define CTR_SAVE (LR_SAVE + 8)
#define XER_SAVE (CTR_SAVE + 8)
#define GPR_SAVE(n) (XER_SAVE + 8 + (8 * n))
#define FSCR_SAVE (GPR_SAVE(31) + 8)
#define VSCR_SAVE (FSCR_SAVE + 8)
#define VSR_SAVE(n) (VSCR_SAVE + 8 + (16 * n))
#define SAVE_GPR(n) std n,GPR_SAVE(n)(r1)
#define REST_GPR(n) ld n,GPR_SAVE(n)(r1)
#define TRASH_GPR(n) lis n,0xaaaa
#define SAVE_VSR(n, b) li b, VSR_SAVE(n); stxvd2x n,b,r1
#define LOAD_VSR(n, b) li b, VSR_SAVE(n); lxvd2x n,b,r1
#define LOAD_REG_IMMEDIATE(reg,expr) \
lis reg,(expr)@highest; \
ori reg,reg,(expr)@higher; \
rldicr reg,reg,32,31; \
oris reg,reg,(expr)@h; \
ori reg,reg,(expr)@l;
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define ENTRY_POINT(name) \
.type FUNC_NAME(name),@function; \
.globl FUNC_NAME(name); \
FUNC_NAME(name):
#define RESTORE_TOC(name) \
/* Restore our TOC pointer using our entry point */ \
LOAD_REG_IMMEDIATE(r12, name) \
0: addis r2,r12,(.TOC.-0b)@ha; \
addi r2,r2,(.TOC.-0b)@l;
#else
#define ENTRY_POINT(name) FUNC_START(name)
#define RESTORE_TOC(name) \
/* Restore our TOC pointer via our opd entry */ \
LOAD_REG_IMMEDIATE(r2, name) \
ld r2,8(r2);
#endif
.text
ENTRY_POINT(ebb_handler)
stdu r1,-STACK_FRAME(r1)
SAVE_GPR(0)
mflr r0
std r0,LR_SAVE(r1)
mfcr r0
std r0,CCR_SAVE(r1)
mfctr r0
std r0,CTR_SAVE(r1)
mfxer r0
std r0,XER_SAVE(r1)
SAVE_GPR(2)
SAVE_GPR(3)
SAVE_GPR(4)
SAVE_GPR(5)
SAVE_GPR(6)
SAVE_GPR(7)
SAVE_GPR(8)
SAVE_GPR(9)
SAVE_GPR(10)
SAVE_GPR(11)
SAVE_GPR(12)
SAVE_GPR(13)
SAVE_GPR(14)
SAVE_GPR(15)
SAVE_GPR(16)
SAVE_GPR(17)
SAVE_GPR(18)
SAVE_GPR(19)
SAVE_GPR(20)
SAVE_GPR(21)
SAVE_GPR(22)
SAVE_GPR(23)
SAVE_GPR(24)
SAVE_GPR(25)
SAVE_GPR(26)
SAVE_GPR(27)
SAVE_GPR(28)
SAVE_GPR(29)
SAVE_GPR(30)
SAVE_GPR(31)
SAVE_VSR(0, r3)
mffs f0
stfd f0, FSCR_SAVE(r1)
mfvscr f0
stfd f0, VSCR_SAVE(r1)
SAVE_VSR(1, r3)
SAVE_VSR(2, r3)
SAVE_VSR(3, r3)
SAVE_VSR(4, r3)
SAVE_VSR(5, r3)
SAVE_VSR(6, r3)
SAVE_VSR(7, r3)
SAVE_VSR(8, r3)
SAVE_VSR(9, r3)
SAVE_VSR(10, r3)
SAVE_VSR(11, r3)
SAVE_VSR(12, r3)
SAVE_VSR(13, r3)
SAVE_VSR(14, r3)
SAVE_VSR(15, r3)
SAVE_VSR(16, r3)
SAVE_VSR(17, r3)
SAVE_VSR(18, r3)
SAVE_VSR(19, r3)
SAVE_VSR(20, r3)
SAVE_VSR(21, r3)
SAVE_VSR(22, r3)
SAVE_VSR(23, r3)
SAVE_VSR(24, r3)
SAVE_VSR(25, r3)
SAVE_VSR(26, r3)
SAVE_VSR(27, r3)
SAVE_VSR(28, r3)
SAVE_VSR(29, r3)
SAVE_VSR(30, r3)
SAVE_VSR(31, r3)
SAVE_VSR(32, r3)
SAVE_VSR(33, r3)
SAVE_VSR(34, r3)
SAVE_VSR(35, r3)
SAVE_VSR(36, r3)
SAVE_VSR(37, r3)
SAVE_VSR(38, r3)
SAVE_VSR(39, r3)
SAVE_VSR(40, r3)
SAVE_VSR(41, r3)
SAVE_VSR(42, r3)
SAVE_VSR(43, r3)
SAVE_VSR(44, r3)
SAVE_VSR(45, r3)
SAVE_VSR(46, r3)
SAVE_VSR(47, r3)
SAVE_VSR(48, r3)
SAVE_VSR(49, r3)
SAVE_VSR(50, r3)
SAVE_VSR(51, r3)
SAVE_VSR(52, r3)
SAVE_VSR(53, r3)
SAVE_VSR(54, r3)
SAVE_VSR(55, r3)
SAVE_VSR(56, r3)
SAVE_VSR(57, r3)
SAVE_VSR(58, r3)
SAVE_VSR(59, r3)
SAVE_VSR(60, r3)
SAVE_VSR(61, r3)
SAVE_VSR(62, r3)
SAVE_VSR(63, r3)
TRASH_GPR(2)
TRASH_GPR(3)
TRASH_GPR(4)
TRASH_GPR(5)
TRASH_GPR(6)
TRASH_GPR(7)
TRASH_GPR(8)
TRASH_GPR(9)
TRASH_GPR(10)
TRASH_GPR(11)
TRASH_GPR(12)
TRASH_GPR(14)
TRASH_GPR(15)
TRASH_GPR(16)
TRASH_GPR(17)
TRASH_GPR(18)
TRASH_GPR(19)
TRASH_GPR(20)
TRASH_GPR(21)
TRASH_GPR(22)
TRASH_GPR(23)
TRASH_GPR(24)
TRASH_GPR(25)
TRASH_GPR(26)
TRASH_GPR(27)
TRASH_GPR(28)
TRASH_GPR(29)
TRASH_GPR(30)
TRASH_GPR(31)
RESTORE_TOC(ebb_handler)
/*
* r13 is our TLS pointer. We leave whatever value was in there when the
* EBB fired. That seems to be OK because once set the TLS pointer is not
* changed - but presumably that could change in future.
*/
bl ebb_hook
nop
/* r2 may be changed here but we don't care */
lfd f0, FSCR_SAVE(r1)
mtfsf 0xff,f0
lfd f0, VSCR_SAVE(r1)
mtvscr f0
LOAD_VSR(0, r3)
LOAD_VSR(1, r3)
LOAD_VSR(2, r3)
LOAD_VSR(3, r3)
LOAD_VSR(4, r3)
LOAD_VSR(5, r3)
LOAD_VSR(6, r3)
LOAD_VSR(7, r3)
LOAD_VSR(8, r3)
LOAD_VSR(9, r3)
LOAD_VSR(10, r3)
LOAD_VSR(11, r3)
LOAD_VSR(12, r3)
LOAD_VSR(13, r3)
LOAD_VSR(14, r3)
LOAD_VSR(15, r3)
LOAD_VSR(16, r3)
LOAD_VSR(17, r3)
LOAD_VSR(18, r3)
LOAD_VSR(19, r3)
LOAD_VSR(20, r3)
LOAD_VSR(21, r3)
LOAD_VSR(22, r3)
LOAD_VSR(23, r3)
LOAD_VSR(24, r3)
LOAD_VSR(25, r3)
LOAD_VSR(26, r3)
LOAD_VSR(27, r3)
LOAD_VSR(28, r3)
LOAD_VSR(29, r3)
LOAD_VSR(30, r3)
LOAD_VSR(31, r3)
LOAD_VSR(32, r3)
LOAD_VSR(33, r3)
LOAD_VSR(34, r3)
LOAD_VSR(35, r3)
LOAD_VSR(36, r3)
LOAD_VSR(37, r3)
LOAD_VSR(38, r3)
LOAD_VSR(39, r3)
LOAD_VSR(40, r3)
LOAD_VSR(41, r3)
LOAD_VSR(42, r3)
LOAD_VSR(43, r3)
LOAD_VSR(44, r3)
LOAD_VSR(45, r3)
LOAD_VSR(46, r3)
LOAD_VSR(47, r3)
LOAD_VSR(48, r3)
LOAD_VSR(49, r3)
LOAD_VSR(50, r3)
LOAD_VSR(51, r3)
LOAD_VSR(52, r3)
LOAD_VSR(53, r3)
LOAD_VSR(54, r3)
LOAD_VSR(55, r3)
LOAD_VSR(56, r3)
LOAD_VSR(57, r3)
LOAD_VSR(58, r3)
LOAD_VSR(59, r3)
LOAD_VSR(60, r3)
LOAD_VSR(61, r3)
LOAD_VSR(62, r3)
LOAD_VSR(63, r3)
ld r0,XER_SAVE(r1)
mtxer r0
ld r0,CTR_SAVE(r1)
mtctr r0
ld r0,LR_SAVE(r1)
mtlr r0
ld r0,CCR_SAVE(r1)
mtcr r0
REST_GPR(0)
REST_GPR(2)
REST_GPR(3)
REST_GPR(4)
REST_GPR(5)
REST_GPR(6)
REST_GPR(7)
REST_GPR(8)
REST_GPR(9)
REST_GPR(10)
REST_GPR(11)
REST_GPR(12)
REST_GPR(13)
REST_GPR(14)
REST_GPR(15)
REST_GPR(16)
REST_GPR(17)
REST_GPR(18)
REST_GPR(19)
REST_GPR(20)
REST_GPR(21)
REST_GPR(22)
REST_GPR(23)
REST_GPR(24)
REST_GPR(25)
REST_GPR(26)
REST_GPR(27)
REST_GPR(28)
REST_GPR(29)
REST_GPR(30)
REST_GPR(31)
addi r1,r1,STACK_FRAME
RFEBB
FUNC_END(ebb_handler)
|
aixcc-public/challenge-001-exemplar-source
| 3,553
|
tools/arch/x86/lib/memcpy_64.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2002 Andi Kleen */
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
#include <asm/export.h>
.pushsection .noinstr.text, "ax"
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
* have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
*/
/*
* memcpy - Copy a memory block.
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* rax original destination
*/
SYM_TYPED_FUNC_START(__memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
movq %rdi, %rax
movq %rdx, %rcx
shrq $3, %rcx
andl $7, %edx
rep movsq
movl %edx, %ecx
rep movsb
RET
SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy)
/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
SYM_FUNC_START_LOCAL(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
RET
SYM_FUNC_END(memcpy_erms)
SYM_FUNC_START_LOCAL(memcpy_orig)
movq %rdi, %rax
cmpq $0x20, %rdx
jb .Lhandle_tail
/*
* We check whether memory false dependence could occur,
* then jump to corresponding copy mode.
*/
cmp %dil, %sil
jl .Lcopy_backward
subq $0x20, %rdx
.Lcopy_forward_loop:
subq $0x20, %rdx
/*
* Move in blocks of 4x8 bytes:
*/
movq 0*8(%rsi), %r8
movq 1*8(%rsi), %r9
movq 2*8(%rsi), %r10
movq 3*8(%rsi), %r11
leaq 4*8(%rsi), %rsi
movq %r8, 0*8(%rdi)
movq %r9, 1*8(%rdi)
movq %r10, 2*8(%rdi)
movq %r11, 3*8(%rdi)
leaq 4*8(%rdi), %rdi
jae .Lcopy_forward_loop
addl $0x20, %edx
jmp .Lhandle_tail
.Lcopy_backward:
/*
* Calculate copy position to tail.
*/
addq %rdx, %rsi
addq %rdx, %rdi
subq $0x20, %rdx
/*
* At most 3 ALU operations in one cycle,
* so append NOPS in the same 16 bytes trunk.
*/
.p2align 4
.Lcopy_backward_loop:
subq $0x20, %rdx
movq -1*8(%rsi), %r8
movq -2*8(%rsi), %r9
movq -3*8(%rsi), %r10
movq -4*8(%rsi), %r11
leaq -4*8(%rsi), %rsi
movq %r8, -1*8(%rdi)
movq %r9, -2*8(%rdi)
movq %r10, -3*8(%rdi)
movq %r11, -4*8(%rdi)
leaq -4*8(%rdi), %rdi
jae .Lcopy_backward_loop
/*
* Calculate copy position to head.
*/
addl $0x20, %edx
subq %rdx, %rsi
subq %rdx, %rdi
.Lhandle_tail:
cmpl $16, %edx
jb .Lless_16bytes
/*
* Move data from 16 bytes to 31 bytes.
*/
movq 0*8(%rsi), %r8
movq 1*8(%rsi), %r9
movq -2*8(%rsi, %rdx), %r10
movq -1*8(%rsi, %rdx), %r11
movq %r8, 0*8(%rdi)
movq %r9, 1*8(%rdi)
movq %r10, -2*8(%rdi, %rdx)
movq %r11, -1*8(%rdi, %rdx)
RET
.p2align 4
.Lless_16bytes:
cmpl $8, %edx
jb .Lless_8bytes
/*
* Move data from 8 bytes to 15 bytes.
*/
movq 0*8(%rsi), %r8
movq -1*8(%rsi, %rdx), %r9
movq %r8, 0*8(%rdi)
movq %r9, -1*8(%rdi, %rdx)
RET
.p2align 4
.Lless_8bytes:
cmpl $4, %edx
jb .Lless_3bytes
/*
* Move data from 4 bytes to 7 bytes.
*/
movl (%rsi), %ecx
movl -4(%rsi, %rdx), %r8d
movl %ecx, (%rdi)
movl %r8d, -4(%rdi, %rdx)
RET
.p2align 4
.Lless_3bytes:
subl $1, %edx
jb .Lend
/*
* Move data from 1 bytes to 3 bytes.
*/
movzbl (%rsi), %ecx
jz .Lstore_1byte
movzbq 1(%rsi), %r8
movzbq (%rsi, %rdx), %r9
movb %r8b, 1(%rdi)
movb %r9b, (%rdi, %rdx)
.Lstore_1byte:
movb %cl, (%rdi)
.Lend:
RET
SYM_FUNC_END(memcpy_orig)
.popsection
|
aixcc-public/challenge-001-exemplar-source
| 2,817
|
tools/arch/x86/lib/memset_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
#include <asm/export.h>
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
* simpler and shorter than the original function as well.
*
* rdi destination
* rsi value (char)
* rdx count (bytes)
*
* rax original destination
*/
SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions.
*
* Otherwise, use original memset function.
*/
ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memset_erms", X86_FEATURE_ERMS
movq %rdi,%r9
movq %rdx,%rcx
andl $7,%edx
shrq $3,%rcx
/* expand byte value */
movzbl %sil,%esi
movabs $0x0101010101010101,%rax
imulq %rsi,%rax
rep stosq
movl %edx,%ecx
rep stosb
movq %r9,%rax
RET
SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
SYM_FUNC_ALIAS_WEAK(memset, __memset)
EXPORT_SYMBOL(memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
* enhanced rep stosb to override the fast string function.
* The code is simpler and shorter than the fast string function as well.
*
* rdi destination
* rsi value (char)
* rdx count (bytes)
*
* rax original destination
*/
SYM_FUNC_START_LOCAL(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
RET
SYM_FUNC_END(memset_erms)
SYM_FUNC_START_LOCAL(memset_orig)
movq %rdi,%r10
/* expand byte value */
movzbl %sil,%ecx
movabs $0x0101010101010101,%rax
imulq %rcx,%rax
/* align dst */
movl %edi,%r9d
andl $7,%r9d
jnz .Lbad_alignment
.Lafter_bad_alignment:
movq %rdx,%rcx
shrq $6,%rcx
jz .Lhandle_tail
.p2align 4
.Lloop_64:
decq %rcx
movq %rax,(%rdi)
movq %rax,8(%rdi)
movq %rax,16(%rdi)
movq %rax,24(%rdi)
movq %rax,32(%rdi)
movq %rax,40(%rdi)
movq %rax,48(%rdi)
movq %rax,56(%rdi)
leaq 64(%rdi),%rdi
jnz .Lloop_64
/* Handle tail in loops. The loops should be faster than hard
to predict jump tables. */
.p2align 4
.Lhandle_tail:
movl %edx,%ecx
andl $63&(~7),%ecx
jz .Lhandle_7
shrl $3,%ecx
.p2align 4
.Lloop_8:
decl %ecx
movq %rax,(%rdi)
leaq 8(%rdi),%rdi
jnz .Lloop_8
.Lhandle_7:
andl $7,%edx
jz .Lende
.p2align 4
.Lloop_1:
decl %edx
movb %al,(%rdi)
leaq 1(%rdi),%rdi
jnz .Lloop_1
.Lende:
movq %r10,%rax
RET
.Lbad_alignment:
cmpq $7,%rdx
jbe .Lhandle_7
movq %rax,(%rdi) /* unaligned store */
movq $8,%r8
subq %r9,%r8
addq %r8,%rdi
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
SYM_FUNC_END(memset_orig)
|
aixcc-public/challenge-001-exemplar-source
| 4,109
|
arch/nios2/kernel/head.S
|
/*
* Copyright (C) 2009 Wind River Systems Inc
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
* Copyright (C) 2004 Microtronix Datacom Ltd
* Copyright (C) 2001 Vic Phillips, Microtronix Datacom Ltd.
*
* Based on head.S for Altera's Excalibur development board with nios processor
*
* Based on the following from the Excalibur sdk distribution:
* NA_MemoryMap.s, NR_JumpToStart.s, NR_Setup.s, NR_CWPManager.s
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/asm-macros.h>
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
.data
.global empty_zero_page
.align 12
empty_zero_page:
.space PAGE_SIZE
/*
* This global variable is used as an extension to the nios'
* STATUS register to emulate a user/supervisor mode.
*/
.data
.align 2
.set noat
.global _current_thread
_current_thread:
.long 0
/*
* Input(s): passed from u-boot
* r4 - Optional pointer to a board information structure.
* r5 - Optional pointer to the physical starting address of the init RAM
* disk.
* r6 - Optional pointer to the physical ending address of the init RAM
* disk.
* r7 - Optional pointer to the physical starting address of any kernel
* command-line parameters.
*/
/*
* First executable code - detected and jumped to by the ROM bootstrap
* if the code resides in flash (looks for "Nios" at offset 0x0c from
* the potential executable image).
*/
__HEAD
ENTRY(_start)
wrctl status, r0 /* Disable interrupts */
/* Initialize all cache lines within the instruction cache */
movia r1, NIOS2_ICACHE_SIZE
movui r2, NIOS2_ICACHE_LINE_SIZE
icache_init:
initi r1
sub r1, r1, r2
bgt r1, r0, icache_init
br 1f
/*
* This is the default location for the exception handler. Code in jump
* to our handler
*/
ENTRY(exception_handler_hook)
movia r24, inthandler
jmp r24
ENTRY(fast_handler)
nextpc et
helper:
stw r3, r3save - helper(et)
rdctl r3 , pteaddr
srli r3, r3, 12
slli r3, r3, 2
movia et, pgd_current
ldw et, 0(et)
add r3, et, r3
ldw et, 0(r3)
rdctl r3, pteaddr
andi r3, r3, 0xfff
add et, r3, et
ldw et, 0(et)
wrctl tlbacc, et
nextpc et
helper2:
ldw r3, r3save - helper2(et)
subi ea, ea, 4
eret
r3save:
.word 0x0
ENTRY(fast_handler_end)
1:
/*
* After the instruction cache is initialized, the data cache must
* also be initialized.
*/
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
dcache_init:
initd 0(r1)
sub r1, r1, r2
bgt r1, r0, dcache_init
nextpc r1 /* Find out where we are */
chkadr:
movia r2, chkadr
beq r1, r2,finish_move /* We are running in RAM done */
addi r1, r1,(_start - chkadr) /* Source */
movia r2, _start /* Destination */
movia r3, __bss_start /* End of copy */
loop_move: /* r1: src, r2: dest, r3: last dest */
ldw r8, 0(r1) /* load a word from [r1] */
stw r8, 0(r2) /* store a word to dest [r2] */
flushd 0(r2) /* Flush cache for safety */
addi r1, r1, 4 /* inc the src addr */
addi r2, r2, 4 /* inc the dest addr */
blt r2, r3, loop_move
movia r1, finish_move /* VMA(_start)->l1 */
jmp r1 /* jmp to _start */
finish_move:
/* Mask off all possible interrupts */
wrctl ienable, r0
/* Clear .bss */
movia r2, __bss_start
movia r1, __bss_stop
1:
stb r0, 0(r2)
addi r2, r2, 1
bne r1, r2, 1b
movia r1, init_thread_union /* set stack at top of the task union */
addi sp, r1, THREAD_SIZE
movia r2, _current_thread /* Remember current thread */
stw r1, 0(r2)
movia r1, nios2_boot_init /* save args r4-r7 passed from u-boot */
callr r1
movia r1, start_kernel /* call start_kernel as a subroutine */
callr r1
/* If we return from start_kernel, break to the oci debugger and
* buggered we are.
*/
break
/* End of startup code */
.set at
|
aixcc-public/challenge-001-exemplar-source
| 14,987
|
arch/nios2/kernel/entry.S
|
/*
* linux/arch/nios2/kernel/entry.S
*
* Copyright (C) 2013-2014 Altera Corporation
* Copyright (C) 2009, Wind River Systems Inc
*
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
*
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 2004 Microtronix Datacom Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
* ColdFire support by Greg Ungerer (gerg@snapgear.com)
* 5307 fixes by David W. Miller
* linux 2.4 support David McCullough <davidm@snapgear.com>
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/asm-macros.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/entry.h>
#include <asm/unistd.h>
#include <asm/processor.h>
.macro GET_THREAD_INFO reg
.if THREAD_SIZE & 0xffff0000
andhi \reg, sp, %hi(~(THREAD_SIZE-1))
.else
addi \reg, r0, %lo(~(THREAD_SIZE-1))
and \reg, \reg, sp
.endif
.endm
.macro kuser_cmpxchg_check
/*
* Make sure our user space atomic helper is restarted if it was
* interrupted in a critical region.
* ea-4 = address of interrupted insn (ea must be preserved).
* sp = saved regs.
* cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn.
* If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to
* cmpxchg_ldw + 4.
*/
/* et = cmpxchg_stw + 4 */
movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start))
bgtu ea, et, 1f
subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */
bltu ea, et, 1f
stw et, PT_EA(sp) /* fix up EA */
mov ea, et
1:
.endm
.section .rodata
.align 4
exception_table:
.word unhandled_exception /* 0 - Reset */
.word unhandled_exception /* 1 - Processor-only Reset */
.word external_interrupt /* 2 - Interrupt */
.word handle_trap /* 3 - Trap Instruction */
.word instruction_trap /* 4 - Unimplemented instruction */
.word handle_illegal /* 5 - Illegal instruction */
.word handle_unaligned /* 6 - Misaligned data access */
.word handle_unaligned /* 7 - Misaligned destination address */
.word handle_diverror /* 8 - Division error */
.word protection_exception_ba /* 9 - Supervisor-only instr. address */
.word protection_exception_instr /* 10 - Supervisor only instruction */
.word protection_exception_ba /* 11 - Supervisor only data address */
.word unhandled_exception /* 12 - Double TLB miss (data) */
.word protection_exception_pte /* 13 - TLB permission violation (x) */
.word protection_exception_pte /* 14 - TLB permission violation (r) */
.word protection_exception_pte /* 15 - TLB permission violation (w) */
.word unhandled_exception /* 16 - MPU region violation */
trap_table:
.word handle_system_call /* 0 */
.word handle_trap_1 /* 1 */
.word handle_trap_2 /* 2 */
.word handle_trap_3 /* 3 */
.word handle_trap_reserved /* 4 */
.word handle_trap_reserved /* 5 */
.word handle_trap_reserved /* 6 */
.word handle_trap_reserved /* 7 */
.word handle_trap_reserved /* 8 */
.word handle_trap_reserved /* 9 */
.word handle_trap_reserved /* 10 */
.word handle_trap_reserved /* 11 */
.word handle_trap_reserved /* 12 */
.word handle_trap_reserved /* 13 */
.word handle_trap_reserved /* 14 */
.word handle_trap_reserved /* 15 */
.word handle_trap_reserved /* 16 */
.word handle_trap_reserved /* 17 */
.word handle_trap_reserved /* 18 */
.word handle_trap_reserved /* 19 */
.word handle_trap_reserved /* 20 */
.word handle_trap_reserved /* 21 */
.word handle_trap_reserved /* 22 */
.word handle_trap_reserved /* 23 */
.word handle_trap_reserved /* 24 */
.word handle_trap_reserved /* 25 */
.word handle_trap_reserved /* 26 */
.word handle_trap_reserved /* 27 */
.word handle_trap_reserved /* 28 */
.word handle_trap_reserved /* 29 */
#ifdef CONFIG_KGDB
.word handle_kgdb_breakpoint /* 30 KGDB breakpoint */
#else
.word instruction_trap /* 30 */
#endif
.word handle_breakpoint /* 31 */
.text
.set noat
.set nobreak
ENTRY(inthandler)
SAVE_ALL
kuser_cmpxchg_check
/* Clear EH bit before we get a new excpetion in the kernel
* and after we have saved it to the exception frame. This is done
* whether it's trap, tlb-miss or interrupt. If we don't do this
* estatus is not updated the next exception.
*/
rdctl r24, status
movi r9, %lo(~STATUS_EH)
and r24, r24, r9
wrctl status, r24
/* Read cause and vector and branch to the associated handler */
mov r4, sp
rdctl r5, exception
movia r9, exception_table
add r24, r9, r5
ldw r24, 0(r24)
jmp r24
/***********************************************************************
* Handle traps
***********************************************************************
*/
ENTRY(handle_trap)
ldwio r24, -4(ea) /* instruction that caused the exception */
srli r24, r24, 4
andi r24, r24, 0x7c
movia r9,trap_table
add r24, r24, r9
ldw r24, 0(r24)
jmp r24
/***********************************************************************
* Handle system calls
***********************************************************************
*/
ENTRY(handle_system_call)
/* Enable interrupts */
rdctl r10, status
ori r10, r10, STATUS_PIE
wrctl status, r10
/* Reload registers destroyed by common code. */
ldw r4, PT_R4(sp)
ldw r5, PT_R5(sp)
local_restart:
stw r2, PT_ORIG_R2(sp)
/* Check that the requested system call is within limits */
movui r1, __NR_syscalls
bgeu r2, r1, ret_invsyscall
slli r1, r2, 2
movhi r11, %hiadj(sys_call_table)
add r1, r1, r11
ldw r1, %lo(sys_call_table)(r1)
/* Check if we are being traced */
GET_THREAD_INFO r11
ldw r11,TI_FLAGS(r11)
BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call
/* Execute the system call */
callr r1
/* If the syscall returns a negative result:
* Set r7 to 1 to indicate error,
* Negate r2 to get a positive error code
* If the syscall returns zero or a positive value:
* Set r7 to 0.
* The sigreturn system calls will skip the code below by
* adding to register ra. To avoid destroying registers
*/
translate_rc_and_ret:
movi r1, 0
bge r2, zero, 3f
ldw r1, PT_ORIG_R2(sp)
addi r1, r1, 1
beq r1, zero, 3f
sub r2, zero, r2
movi r1, 1
3:
stw r2, PT_R2(sp)
stw r1, PT_R7(sp)
end_translate_rc_and_ret:
ret_from_exception:
ldw r1, PT_ESTATUS(sp)
/* if so, skip resched, signals */
TSTBNZ r1, r1, ESTATUS_EU, Luser_return
restore_all:
rdctl r10, status /* disable intrs */
andi r10, r10, %lo(~STATUS_PIE)
wrctl status, r10
RESTORE_ALL
eret
/* If the syscall number was invalid return ENOSYS */
ret_invsyscall:
movi r2, -ENOSYS
br translate_rc_and_ret
/* This implements the same as above, except it calls
* do_syscall_trace_enter and do_syscall_trace_exit before and after the
* syscall in order for utilities like strace and gdb to work.
*/
traced_system_call:
SAVE_SWITCH_STACK
call do_syscall_trace_enter
RESTORE_SWITCH_STACK
/* Create system call register arguments. The 5th and 6th
arguments on stack are already in place at the beginning
of pt_regs. */
ldw r2, PT_R2(sp)
ldw r4, PT_R4(sp)
ldw r5, PT_R5(sp)
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
/* Fetch the syscall function. */
movui r1, __NR_syscalls
bgeu r2, r1, traced_invsyscall
slli r1, r2, 2
movhi r11,%hiadj(sys_call_table)
add r1, r1, r11
ldw r1, %lo(sys_call_table)(r1)
callr r1
/* If the syscall returns a negative result:
* Set r7 to 1 to indicate error,
* Negate r2 to get a positive error code
* If the syscall returns zero or a positive value:
* Set r7 to 0.
* The sigreturn system calls will skip the code below by
* adding to register ra. To avoid destroying registers
*/
translate_rc_and_ret2:
movi r1, 0
bge r2, zero, 4f
ldw r1, PT_ORIG_R2(sp)
addi r1, r1, 1
beq r1, zero, 4f
sub r2, zero, r2
movi r1, 1
4:
stw r2, PT_R2(sp)
stw r1, PT_R7(sp)
end_translate_rc_and_ret2:
SAVE_SWITCH_STACK
call do_syscall_trace_exit
RESTORE_SWITCH_STACK
br ret_from_exception
/* If the syscall number was invalid return ENOSYS */
traced_invsyscall:
movi r2, -ENOSYS
br translate_rc_and_ret2
Luser_return:
GET_THREAD_INFO r11 /* get thread_info pointer */
ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
ANDI32 r11, r10, _TIF_WORK_MASK
beq r11, r0, restore_all /* Nothing to do */
BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return
/* Reschedule work */
call schedule
br ret_from_exception
Lsignal_return:
ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
beq r1, r0, restore_all
mov r4, sp /* pt_regs */
SAVE_SWITCH_STACK
call do_notify_resume
beq r2, r0, no_work_pending
RESTORE_SWITCH_STACK
/* prepare restart syscall here without leaving kernel */
ldw r2, PT_R2(sp) /* reload syscall number in r2 */
ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */
ldw r5, PT_R5(sp)
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
ldw r8, PT_R8(sp)
ldw r9, PT_R9(sp)
br local_restart /* restart syscall */
no_work_pending:
RESTORE_SWITCH_STACK
br ret_from_exception
/***********************************************************************
* Handle external interrupts.
***********************************************************************
*/
/*
* This is the generic interrupt handler (for all hardware interrupt
* sources). It figures out the vector number and calls the appropriate
* interrupt service routine directly.
*/
external_interrupt:
rdctl r12, ipending
rdctl r9, ienable
and r12, r12, r9
/* skip if no interrupt is pending */
beq r12, r0, ret_from_interrupt
/*
* Process an external hardware interrupt.
*/
addi ea, ea, -4 /* re-issue the interrupted instruction */
stw ea, PT_EA(sp)
2: movi r4, %lo(-1) /* Start from bit position 0,
highest priority */
/* This is the IRQ # for handler call */
1: andi r10, r12, 1 /* Isolate bit we are interested in */
srli r12, r12, 1 /* shift count is costly without hardware
multiplier */
addi r4, r4, 1
beq r10, r0, 1b
mov r5, sp /* Setup pt_regs pointer for handler call */
call do_IRQ
rdctl r12, ipending /* check again if irq still pending */
rdctl r9, ienable /* Isolate possible interrupts */
and r12, r12, r9
bne r12, r0, 2b
/* br ret_from_interrupt */ /* fall through to ret_from_interrupt */
ENTRY(ret_from_interrupt)
ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */
TSTBNZ r1, r1, ESTATUS_EU, Luser_return
#ifdef CONFIG_PREEMPTION
GET_THREAD_INFO r1
ldw r4, TI_PREEMPT_COUNT(r1)
bne r4, r0, restore_all
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
andi r10, r4, ESTATUS_EPIE
beq r10, r0, restore_all
call preempt_schedule_irq
#endif
br restore_all
/***********************************************************************
* A few syscall wrappers
***********************************************************************
*/
/*
* int clone(unsigned long clone_flags, unsigned long newsp,
* int __user * parent_tidptr, int __user * child_tidptr,
* int tls_val)
*/
ENTRY(sys_clone)
SAVE_SWITCH_STACK
subi sp, sp, 4 /* make space for tls pointer */
stw r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */
call nios2_clone
addi sp, sp, 4
RESTORE_SWITCH_STACK
ret
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
mov r4, sp
call do_rt_sigreturn
RESTORE_SWITCH_STACK
addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret)
ret
/***********************************************************************
* A few other wrappers and stubs
***********************************************************************
*/
protection_exception_pte:
rdctl r6, pteaddr
slli r6, r6, 10
call do_page_fault
br ret_from_exception
protection_exception_ba:
rdctl r6, badaddr
call do_page_fault
br ret_from_exception
protection_exception_instr:
call handle_supervisor_instr
br ret_from_exception
handle_breakpoint:
call breakpoint_c
br ret_from_exception
#ifdef CONFIG_NIOS2_ALIGNMENT_TRAP
handle_unaligned:
SAVE_SWITCH_STACK
call handle_unaligned_c
RESTORE_SWITCH_STACK
br ret_from_exception
#else
handle_unaligned:
call handle_unaligned_c
br ret_from_exception
#endif
handle_illegal:
call handle_illegal_c
br ret_from_exception
handle_diverror:
call handle_diverror_c
br ret_from_exception
#ifdef CONFIG_KGDB
handle_kgdb_breakpoint:
call kgdb_breakpoint_c
br ret_from_exception
#endif
handle_trap_1:
call handle_trap_1_c
br ret_from_exception
handle_trap_2:
call handle_trap_2_c
br ret_from_exception
handle_trap_3:
handle_trap_reserved:
call handle_trap_3_c
br ret_from_exception
/*
* Beware - when entering resume, prev (the current task) is
* in r4, next (the new task) is in r5, don't change these
* registers.
*/
ENTRY(resume)
rdctl r7, status /* save thread status reg */
stw r7, TASK_THREAD + THREAD_KPSR(r4)
andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */
wrctl status, r7
SAVE_SWITCH_STACK
stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */
ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */
movia r24, _current_thread /* save thread */
GET_THREAD_INFO r1
stw r1, 0(r24)
RESTORE_SWITCH_STACK
ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */
wrctl status, r7
ret
ENTRY(ret_from_fork)
call schedule_tail
br ret_from_exception
ENTRY(ret_from_kernel_thread)
call schedule_tail
mov r4,r17 /* arg */
callr r16 /* function */
br ret_from_exception
/*
* Kernel user helpers.
*
* Each segment is 64-byte aligned and will be mapped to the <User space>.
* New segments (if ever needed) must be added after the existing ones.
* This mechanism should be used only for things that are really small and
* justified, and not be abused freely.
*
*/
/* Filling pads with undefined instructions. */
.macro kuser_pad sym size
.if ((. - \sym) & 3)
.rept (4 - (. - \sym) & 3)
.byte 0
.endr
.endif
.rept ((\size - (. - \sym)) / 4)
.word 0xdeadbeef
.endr
.endm
.align 6
.globl __kuser_helper_start
__kuser_helper_start:
__kuser_helper_version: /* @ 0x1000 */
.word ((__kuser_helper_end - __kuser_helper_start) >> 6)
__kuser_cmpxchg: /* @ 0x1004 */
/*
* r4 pointer to exchange variable
* r5 old value
* r6 new value
*/
cmpxchg_ldw:
ldw r2, 0(r4) /* load current value */
sub r2, r2, r5 /* compare with old value */
bne r2, zero, cmpxchg_ret
/* We had a match, store the new value */
cmpxchg_stw:
stw r6, 0(r4)
cmpxchg_ret:
ret
kuser_pad __kuser_cmpxchg, 64
.globl __kuser_sigtramp
__kuser_sigtramp:
movi r2, __NR_rt_sigreturn
trap
kuser_pad __kuser_sigtramp, 64
.globl __kuser_helper_end
__kuser_helper_end:
|
aixcc-public/challenge-001-exemplar-source
| 14,754
|
arch/nios2/kernel/insnemu.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2003-2013 Altera Corporation
* All rights reserved.
*/
#include <linux/linkage.h>
#include <asm/entry.h>
.set noat
.set nobreak
/*
* Explicitly allow the use of r1 (the assembler temporary register)
* within this code. This register is normally reserved for the use of
* the compiler.
*/
ENTRY(instruction_trap)
ldw r1, PT_R1(sp) // Restore registers
ldw r2, PT_R2(sp)
ldw r3, PT_R3(sp)
ldw r4, PT_R4(sp)
ldw r5, PT_R5(sp)
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
ldw r8, PT_R8(sp)
ldw r9, PT_R9(sp)
ldw r10, PT_R10(sp)
ldw r11, PT_R11(sp)
ldw r12, PT_R12(sp)
ldw r13, PT_R13(sp)
ldw r14, PT_R14(sp)
ldw r15, PT_R15(sp)
ldw ra, PT_RA(sp)
ldw fp, PT_FP(sp)
ldw gp, PT_GP(sp)
ldw et, PT_ESTATUS(sp)
wrctl estatus, et
ldw ea, PT_EA(sp)
ldw et, PT_SP(sp) /* backup sp in et */
addi sp, sp, PT_REGS_SIZE
/* INSTRUCTION EMULATION
* ---------------------
*
* Nios II processors generate exceptions for unimplemented instructions.
* The routines below emulate these instructions. Depending on the
* processor core, the only instructions that might need to be emulated
* are div, divu, mul, muli, mulxss, mulxsu, and mulxuu.
*
* The emulations match the instructions, except for the following
* limitations:
*
* 1) The emulation routines do not emulate the use of the exception
* temporary register (et) as a source operand because the exception
* handler already has modified it.
*
* 2) The routines do not emulate the use of the stack pointer (sp) or
* the exception return address register (ea) as a destination because
* modifying these registers crashes the exception handler or the
* interrupted routine.
*
* Detailed Design
* ---------------
*
* The emulation routines expect the contents of integer registers r0-r31
* to be on the stack at addresses sp, 4(sp), 8(sp), ... 124(sp). The
* routines retrieve source operands from the stack and modify the
* destination register's value on the stack prior to the end of the
* exception handler. Then all registers except the destination register
* are restored to their previous values.
*
* The instruction that causes the exception is found at address -4(ea).
* The instruction's OP and OPX fields identify the operation to be
* performed.
*
* One instruction, muli, is an I-type instruction that is identified by
* an OP field of 0x24.
*
* muli AAAAA,BBBBB,IIIIIIIIIIIIIIII,-0x24-
* 27 22 6 0 <-- LSB of field
*
* The remaining emulated instructions are R-type and have an OP field
* of 0x3a. Their OPX fields identify them.
*
* R-type AAAAA,BBBBB,CCCCC,XXXXXX,NNNNN,-0x3a-
* 27 22 17 11 6 0 <-- LSB of field
*
*
* Opcode Encoding. muli is identified by its OP value. Then OPX & 0x02
* is used to differentiate between the division opcodes and the
* remaining multiplication opcodes.
*
* Instruction OP OPX OPX & 0x02
* ----------- ---- ---- ----------
* muli 0x24
* divu 0x3a 0x24 0
* div 0x3a 0x25 0
* mul 0x3a 0x27 != 0
* mulxuu 0x3a 0x07 != 0
* mulxsu 0x3a 0x17 != 0
* mulxss 0x3a 0x1f != 0
*/
/*
* Save everything on the stack to make it easy for the emulation
* routines to retrieve the source register operands.
*/
addi sp, sp, -128
stw zero, 0(sp) /* Save zero on stack to avoid special case for r0. */
stw r1, 4(sp)
stw r2, 8(sp)
stw r3, 12(sp)
stw r4, 16(sp)
stw r5, 20(sp)
stw r6, 24(sp)
stw r7, 28(sp)
stw r8, 32(sp)
stw r9, 36(sp)
stw r10, 40(sp)
stw r11, 44(sp)
stw r12, 48(sp)
stw r13, 52(sp)
stw r14, 56(sp)
stw r15, 60(sp)
stw r16, 64(sp)
stw r17, 68(sp)
stw r18, 72(sp)
stw r19, 76(sp)
stw r20, 80(sp)
stw r21, 84(sp)
stw r22, 88(sp)
stw r23, 92(sp)
/* Don't bother to save et. It's already been changed. */
rdctl r5, estatus
stw r5, 100(sp)
stw gp, 104(sp)
stw et, 108(sp) /* et contains previous sp value. */
stw fp, 112(sp)
stw ea, 116(sp)
stw ra, 120(sp)
/*
* Split the instruction into its fields. We need 4*A, 4*B, and 4*C as
* offsets to the stack pointer for access to the stored register values.
*/
ldw r2,-4(ea) /* r2 = AAAAA,BBBBB,IIIIIIIIIIIIIIII,PPPPPP */
roli r3, r2, 7 /* r3 = BBB,IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BB */
roli r4, r3, 3 /* r4 = IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB */
roli r5, r4, 2 /* r5 = IIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB,II */
srai r4, r4, 16 /* r4 = (sign-extended) IMM16 */
roli r6, r5, 5 /* r6 = XXXX,NNNNN,PPPPPP,AAAAA,BBBBB,CCCCC,XX */
andi r2, r2, 0x3f /* r2 = 00000000000000000000000000,PPPPPP */
andi r3, r3, 0x7c /* r3 = 0000000000000000000000000,AAAAA,00 */
andi r5, r5, 0x7c /* r5 = 0000000000000000000000000,BBBBB,00 */
andi r6, r6, 0x7c /* r6 = 0000000000000000000000000,CCCCC,00 */
/* Now
* r2 = OP
* r3 = 4*A
* r4 = IMM16 (sign extended)
* r5 = 4*B
* r6 = 4*C
*/
/*
* Get the operands.
*
* It is necessary to check for muli because it uses an I-type
* instruction format, while the other instructions are have an R-type
* format.
*
* Prepare for either multiplication or division loop.
* They both loop 32 times.
*/
movi r14, 32
add r3, r3, sp /* r3 = address of A-operand. */
ldw r3, 0(r3) /* r3 = A-operand. */
movi r7, 0x24 /* muli opcode (I-type instruction format) */
beq r2, r7, mul_immed /* muli doesn't use the B register as a source */
add r5, r5, sp /* r5 = address of B-operand. */
ldw r5, 0(r5) /* r5 = B-operand. */
/* r4 = SSSSSSSSSSSSSSSS,-----IMM16------ */
/* IMM16 not needed, align OPX portion */
/* r4 = SSSSSSSSSSSSSSSS,CCCCC,-OPX--,00000 */
srli r4, r4, 5 /* r4 = 00000,SSSSSSSSSSSSSSSS,CCCCC,-OPX-- */
andi r4, r4, 0x3f /* r4 = 00000000000000000000000000,-OPX-- */
/* Now
* r2 = OP
* r3 = src1
* r5 = src2
* r4 = OPX (no longer can be muli)
* r6 = 4*C
*/
/*
* Multiply or Divide?
*/
andi r7, r4, 0x02 /* For R-type multiply instructions,
OPX & 0x02 != 0 */
bne r7, zero, multiply
/* DIVISION
*
* Divide an unsigned dividend by an unsigned divisor using
* a shift-and-subtract algorithm. The example below shows
* 43 div 7 = 6 for 8-bit integers. This classic algorithm uses a
* single register to store both the dividend and the quotient,
* allowing both values to be shifted with a single instruction.
*
* remainder dividend:quotient
* --------- -----------------
* initialize 00000000 00101011:
* shift 00000000 0101011:_
* remainder >= divisor? no 00000000 0101011:0
* shift 00000000 101011:0_
* remainder >= divisor? no 00000000 101011:00
* shift 00000001 01011:00_
* remainder >= divisor? no 00000001 01011:000
* shift 00000010 1011:000_
* remainder >= divisor? no 00000010 1011:0000
* shift 00000101 011:0000_
* remainder >= divisor? no 00000101 011:00000
* shift 00001010 11:00000_
* remainder >= divisor? yes 00001010 11:000001
* remainder -= divisor - 00000111
* ----------
* 00000011 11:000001
* shift 00000111 1:000001_
* remainder >= divisor? yes 00000111 1:0000011
* remainder -= divisor - 00000111
* ----------
* 00000000 1:0000011
* shift 00000001 :0000011_
* remainder >= divisor? no 00000001 :00000110
*
* The quotient is 00000110.
*/
divide:
/*
* Prepare for division by assuming the result
* is unsigned, and storing its "sign" as 0.
*/
movi r17, 0
/* Which division opcode? */
xori r7, r4, 0x25 /* OPX of div */
bne r7, zero, unsigned_division
/*
* OPX is div. Determine and store the sign of the quotient.
* Then take the absolute value of both operands.
*/
xor r17, r3, r5 /* MSB contains sign of quotient */
bge r3,zero,dividend_is_nonnegative
sub r3, zero, r3 /* -r3 */
dividend_is_nonnegative:
bge r5, zero, divisor_is_nonnegative
sub r5, zero, r5 /* -r5 */
divisor_is_nonnegative:
unsigned_division:
/* Initialize the unsigned-division loop. */
movi r13, 0 /* remainder = 0 */
/* Now
* r3 = dividend : quotient
* r4 = 0x25 for div, 0x24 for divu
* r5 = divisor
* r13 = remainder
* r14 = loop counter (already initialized to 32)
* r17 = MSB contains sign of quotient
*/
/*
* for (count = 32; count > 0; --count)
* {
*/
divide_loop:
/*
* Division:
*
* (remainder:dividend:quotient) <<= 1;
*/
slli r13, r13, 1
cmplt r7, r3, zero /* r7 = MSB of r3 */
or r13, r13, r7
slli r3, r3, 1
/*
* if (remainder >= divisor)
* {
* set LSB of quotient
* remainder -= divisor;
* }
*/
bltu r13, r5, div_skip
ori r3, r3, 1
sub r13, r13, r5
div_skip:
/*
* }
*/
subi r14, r14, 1
bne r14, zero, divide_loop
/* Now
* r3 = quotient
* r4 = 0x25 for div, 0x24 for divu
* r6 = 4*C
* r17 = MSB contains sign of quotient
*/
/*
* Conditionally negate signed quotient. If quotient is unsigned,
* the sign already is initialized to 0.
*/
bge r17, zero, quotient_is_nonnegative
sub r3, zero, r3 /* -r3 */
quotient_is_nonnegative:
/*
* Final quotient is in r3.
*/
add r6, r6, sp
stw r3, 0(r6) /* write quotient to stack */
br restore_registers
/* MULTIPLICATION
*
* A "product" is the number that one gets by summing a "multiplicand"
* several times. The "multiplier" specifies the number of copies of the
* multiplicand that are summed.
*
* Actual multiplication algorithms don't use repeated addition, however.
* Shift-and-add algorithms get the same answer as repeated addition, and
* they are faster. To compute the lower half of a product (pppp below)
* one shifts the product left before adding in each of the partial
* products (a * mmmm) through (d * mmmm).
*
* To compute the upper half of a product (PPPP below), one adds in the
* partial products (d * mmmm) through (a * mmmm), each time following
* the add by a right shift of the product.
*
* mmmm
* * abcd
* ------
* #### = d * mmmm
* #### = c * mmmm
* #### = b * mmmm
* #### = a * mmmm
* --------
* PPPPpppp
*
* The example above shows 4 partial products. Computing actual Nios II
* products requires 32 partials.
*
* It is possible to compute the result of mulxsu from the result of
* mulxuu because the only difference between the results of these two
* opcodes is the value of the partial product associated with the sign
* bit of rA.
*
* mulxsu = mulxuu - (rA < 0) ? rB : 0;
*
* It is possible to compute the result of mulxss from the result of
* mulxsu because the only difference between the results of these two
* opcodes is the value of the partial product associated with the sign
* bit of rB.
*
* mulxss = mulxsu - (rB < 0) ? rA : 0;
*
*/
mul_immed:
/* Opcode is muli. Change it into mul for remainder of algorithm. */
mov r6, r5 /* Field B is dest register, not field C. */
mov r5, r4 /* Field IMM16 is src2, not field B. */
movi r4, 0x27 /* OPX of mul is 0x27 */
multiply:
/* Initialize the multiplication loop. */
movi r9, 0 /* mul_product = 0 */
movi r10, 0 /* mulxuu_product = 0 */
mov r11, r5 /* save original multiplier for mulxsu and mulxss */
mov r12, r5 /* mulxuu_multiplier (will be shifted) */
movi r16, 1 /* used to create "rori B,A,1" from "ror B,A,r16" */
/* Now
* r3 = multiplicand
* r5 = mul_multiplier
* r6 = 4 * dest_register (used later as offset to sp)
* r7 = temp
* r9 = mul_product
* r10 = mulxuu_product
* r11 = original multiplier
* r12 = mulxuu_multiplier
* r14 = loop counter (already initialized)
* r16 = 1
*/
/*
* for (count = 32; count > 0; --count)
* {
*/
multiply_loop:
/*
* mul_product <<= 1;
* lsb = multiplier & 1;
*/
slli r9, r9, 1
andi r7, r12, 1
/*
* if (lsb == 1)
* {
* mulxuu_product += multiplicand;
* }
*/
beq r7, zero, mulx_skip
add r10, r10, r3
cmpltu r7, r10, r3 /* Save the carry from the MSB of mulxuu_product. */
ror r7, r7, r16 /* r7 = 0x80000000 on carry, or else 0x00000000 */
mulx_skip:
/*
* if (MSB of mul_multiplier == 1)
* {
* mul_product += multiplicand;
* }
*/
bge r5, zero, mul_skip
add r9, r9, r3
mul_skip:
/*
* mulxuu_product >>= 1; logical shift
* mul_multiplier <<= 1; done with MSB
* mulx_multiplier >>= 1; done with LSB
*/
srli r10, r10, 1
or r10, r10, r7 /* OR in the saved carry bit. */
slli r5, r5, 1
srli r12, r12, 1
/*
* }
*/
subi r14, r14, 1
bne r14, zero, multiply_loop
/*
* Multiply emulation loop done.
*/
/* Now
* r3 = multiplicand
* r4 = OPX
* r6 = 4 * dest_register (used later as offset to sp)
* r7 = temp
* r9 = mul_product
* r10 = mulxuu_product
* r11 = original multiplier
*/
/* Calculate address for result from 4 * dest_register */
add r6, r6, sp
/*
* Select/compute the result based on OPX.
*/
/* OPX == mul? Then store. */
xori r7, r4, 0x27
beq r7, zero, store_product
/* It's one of the mulx.. opcodes. Move over the result. */
mov r9, r10
/* OPX == mulxuu? Then store. */
xori r7, r4, 0x07
beq r7, zero, store_product
/* Compute mulxsu
*
* mulxsu = mulxuu - (rA < 0) ? rB : 0;
*/
bge r3, zero, mulxsu_skip
sub r9, r9, r11
mulxsu_skip:
/* OPX == mulxsu? Then store. */
xori r7, r4, 0x17
beq r7, zero, store_product
/* Compute mulxss
*
* mulxss = mulxsu - (rB < 0) ? rA : 0;
*/
bge r11,zero,mulxss_skip
sub r9, r9, r3
mulxss_skip:
/* At this point, assume that OPX is mulxss, so store*/
store_product:
stw r9, 0(r6)
restore_registers:
/* No need to restore r0. */
ldw r5, 100(sp)
wrctl estatus, r5
ldw r1, 4(sp)
ldw r2, 8(sp)
ldw r3, 12(sp)
ldw r4, 16(sp)
ldw r5, 20(sp)
ldw r6, 24(sp)
ldw r7, 28(sp)
ldw r8, 32(sp)
ldw r9, 36(sp)
ldw r10, 40(sp)
ldw r11, 44(sp)
ldw r12, 48(sp)
ldw r13, 52(sp)
ldw r14, 56(sp)
ldw r15, 60(sp)
ldw r16, 64(sp)
ldw r17, 68(sp)
ldw r18, 72(sp)
ldw r19, 76(sp)
ldw r20, 80(sp)
ldw r21, 84(sp)
ldw r22, 88(sp)
ldw r23, 92(sp)
/* Does not need to restore et */
ldw gp, 104(sp)
ldw fp, 112(sp)
ldw ea, 116(sp)
ldw ra, 120(sp)
ldw sp, 108(sp) /* last restore sp */
eret
.set at
.set break
|
aixcc-public/challenge-001-exemplar-source
| 1,066
|
arch/nios2/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
*/
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
OUTPUT_FORMAT("elf32-littlenios2", "elf32-littlenios2", "elf32-littlenios2")
OUTPUT_ARCH(nios)
ENTRY(_start) /* Defined in head.S */
jiffies = jiffies_64;
SECTIONS
{
. = CONFIG_NIOS2_MEM_BASE | CONFIG_NIOS2_KERNEL_REGION_BASE;
_text = .;
_stext = .;
HEAD_TEXT_SECTION
.text : {
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
KPROBES_TEXT
} =0
_etext = .;
.got : {
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
EXCEPTION_TABLE(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
_sdata = .;
RO_DATA(PAGE_SIZE)
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
BSS_SECTION(0, 0, 0)
_end = .;
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
DISCARDS
}
|
aixcc-public/challenge-001-exemplar-source
| 2,803
|
arch/nios2/boot/compressed/head.S
|
/*
* Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
*
* Based on arch/nios2/kernel/head.S
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
/*
* This code can be loaded anywhere, eg FLASH ROM as reset vector,
* as long as output does not overlap it.
*/
#include <linux/linkage.h>
#include <asm/cache.h>
.text
.set noat
ENTRY(_start)
wrctl status, r0 /* disable interrupt */
/* invalidate all instruction cache */
movia r1, NIOS2_ICACHE_SIZE
movui r2, NIOS2_ICACHE_LINE_SIZE
1: initi r1
sub r1, r1, r2
bgt r1, r0, 1b
/* invalidate all data cache */
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
1: initd 0(r1)
sub r1, r1, r2
bgt r1, r0, 1b
nextpc r1 /* Find out where we are */
chkadr:
movia r2, chkadr
beq r1, r2, finish_move /* We are running in correct address,
done */
/* move code, r1: src, r2: dest, r3: last dest */
addi r1, r1, (_start - chkadr) /* Source */
movia r2, _start /* Destination */
movia r3, __bss_start /* End of copy */
1: ldw r8, 0(r1) /* load a word from [r1] */
stw r8, 0(r2) /* stort a word to dest [r2] */
addi r1, r1, 4 /* inc the src addr */
addi r2, r2, 4 /* inc the dest addr */
blt r2, r3, 1b
/* flush the data cache after moving */
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
1: flushd 0(r1)
sub r1, r1, r2
bgt r1, r0, 1b
movia r1, finish_move
jmp r1 /* jmp to linked address */
finish_move:
/* zero out the .bss segment (uninitialized common data) */
movia r2, __bss_start /* presume nothing is between */
movia r1, _end /* the .bss and _end. */
1: stb r0, 0(r2)
addi r2, r2, 1
bne r1, r2, 1b
/*
* set up the stack pointer, some where higher than _end.
* The stack space must be greater than 32K for decompress.
*/
movia sp, 0x10000
add sp, sp, r1
/* save args passed from u-boot, maybe */
addi sp, sp, -16
stw r4, 0(sp)
stw r5, 4(sp)
stw r6, 8(sp)
stw r7, 12(sp)
/* decompress the kernel */
call decompress_kernel
/* pass saved args to kernel */
ldw r4, 0(sp)
ldw r5, 4(sp)
ldw r6, 8(sp)
ldw r7, 12(sp)
/* flush all data cache after decompressing */
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
1: flushd 0(r1)
sub r1, r1, r2
bgt r1, r0, 1b
/* flush all instruction cache */
movia r1, NIOS2_ICACHE_SIZE
movui r2, NIOS2_ICACHE_LINE_SIZE
1: flushi r1
sub r1, r1, r2
bgt r1, r0, 1b
flushp
/* jump to start real kernel */
movia r1, (CONFIG_NIOS2_MEM_BASE | CONFIG_NIOS2_KERNEL_REGION_BASE)
jmp r1
.balign 512
fake_headers_as_bzImage:
.short 0
.ascii "HdrS"
.short 0x0202
.short 0
.short 0
.byte 0x00, 0x10
.short 0
.byte 0
.byte 1
.byte 0x00, 0x80
.long 0
.long 0
|
aixcc-public/challenge-001-exemplar-source
| 7,624
|
arch/m68k/coldfire/head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*****************************************************************************/
/*
* head.S -- common startup code for ColdFire CPUs.
*
* (C) Copyright 1999-2011, Greg Ungerer <gerg@snapgear.com>.
*/
/*****************************************************************************/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfmmu.h>
#include <asm/thread_info.h>
/*****************************************************************************/
/*
* If we don't have a fixed memory size, then lets build in code
* to auto detect the DRAM size. Obviously this is the preferred
* method, and should work for most boards. It won't work for those
* that do not have their RAM starting at address 0, and it only
* works on SDRAM (not boards fitted with SRAM).
*/
#if CONFIG_RAMSIZE != 0
.macro GET_MEM_SIZE
movel #CONFIG_RAMSIZE,%d0 /* hard coded memory size */
.endm
#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
defined(CONFIG_M5249) || defined(CONFIG_M525x) || \
defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M5307) || defined(CONFIG_M5407)
/*
* Not all these devices have exactly the same DRAM controller,
* but the DCMR register is virtually identical - give or take
* a couple of bits. The only exception is the 5272 devices, their
* DRAM controller is quite different.
*/
.macro GET_MEM_SIZE
movel MCFSIM_DMR0,%d0 /* get mask for 1st bank */
btst #0,%d0 /* check if region enabled */
beq 1f
andl #0xfffc0000,%d0
beq 1f
addl #0x00040000,%d0 /* convert mask to size */
1:
movel MCFSIM_DMR1,%d1 /* get mask for 2nd bank */
btst #0,%d1 /* check if region enabled */
beq 2f
andl #0xfffc0000,%d1
beq 2f
addl #0x00040000,%d1
addl %d1,%d0 /* total mem size in d0 */
2:
.endm
#elif defined(CONFIG_M5272)
.macro GET_MEM_SIZE
movel MCFSIM_CSOR7,%d0 /* get SDRAM address mask */
andil #0xfffff000,%d0 /* mask out chip select options */
negl %d0 /* negate bits */
.endm
#elif defined(CONFIG_M520x)
.macro GET_MEM_SIZE
clrl %d0
movel MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */
andl #0x1f, %d2 /* Get only the chip select size */
beq 3f /* Check if it is enabled */
addql #1, %d2 /* Form exponent */
moveql #1, %d0
lsll %d2, %d0 /* 2 ^ exponent */
3:
movel MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */
andl #0x1f, %d2 /* Get only the chip select size */
beq 4f /* Check if it is enabled */
addql #1, %d2 /* Form exponent */
moveql #1, %d1
lsll %d2, %d1 /* 2 ^ exponent */
addl %d1, %d0 /* Total size of SDRAM in d0 */
4:
.endm
#else
#error "ERROR: I don't know how to probe your boards memory size?"
#endif
/*****************************************************************************/
/*
* Boards and platforms can do specific early hardware setup if
* they need to. Most don't need this, define away if not required.
*/
#ifndef PLATFORM_SETUP
#define PLATFORM_SETUP
#endif
/*****************************************************************************/
.global _start
.global _rambase
.global _ramvec
.global _ramstart
.global _ramend
#if defined(CONFIG_UBOOT)
.global _init_sp
#endif
/*****************************************************************************/
.data
/*
* During startup we store away the RAM setup. These are not in the
* bss, since their values are determined and written before the bss
* has been cleared.
*/
_rambase:
.long 0
_ramvec:
.long 0
_ramstart:
.long 0
_ramend:
.long 0
#if defined(CONFIG_UBOOT)
_init_sp:
.long 0
#endif
/*****************************************************************************/
__HEAD
#ifdef CONFIG_MMU
_start0:
jmp _start
.global kernel_pg_dir
.equ kernel_pg_dir,_start0
.equ .,_start0+0x1000
#endif
/*
* This is the codes first entry point. This is where it all
* begins...
*/
_start:
nop /* filler */
movew #0x2700, %sr /* no interrupts */
movel #CACHE_INIT,%d0 /* disable cache */
movec %d0,%CACR
nop
#if defined(CONFIG_UBOOT)
movel %sp,_init_sp /* save initial stack pointer */
#endif
#ifdef CONFIG_MBAR
movel #CONFIG_MBAR+1,%d0 /* configured MBAR address */
movec %d0,%MBAR /* set it */
#endif
/*
* Do any platform or board specific setup now. Most boards
* don't need anything. Those exceptions are define this in
* their board specific includes.
*/
PLATFORM_SETUP
/*
* Create basic memory configuration. Set VBR accordingly,
* and size memory.
*/
movel #CONFIG_VECTORBASE,%a7
movec %a7,%VBR /* set vectors addr */
movel %a7,_ramvec
movel #CONFIG_RAMBASE,%a7 /* mark the base of RAM */
movel %a7,_rambase
GET_MEM_SIZE /* macro code determines size */
addl %a7,%d0
movel %d0,_ramend /* set end ram addr */
/*
* Now that we know what the memory is, lets enable cache
* and get things moving. This is Coldfire CPU specific. Not
* all version cores have identical cache register setup. But
* it is very similar. Define the exact settings in the headers
* then the code here is the same for all.
*/
movel #ACR0_MODE,%d0 /* set RAM region for caching */
movec %d0,%ACR0
movel #ACR1_MODE,%d0 /* anything else to cache? */
movec %d0,%ACR1
#ifdef ACR2_MODE
movel #ACR2_MODE,%d0
movec %d0,%ACR2
movel #ACR3_MODE,%d0
movec %d0,%ACR3
#endif
movel #CACHE_MODE,%d0 /* enable cache */
movec %d0,%CACR
nop
#ifdef CONFIG_MMU
/*
* Identity mapping for the kernel region.
*/
movel #(MMUBASE+1),%d0 /* enable MMUBAR registers */
movec %d0,%MMUBAR
movel #MMUOR_CA,%d0 /* clear TLB entries */
movel %d0,MMUOR
movel #0,%d0 /* set ASID to 0 */
movec %d0,%asid
movel #MMUCR_EN,%d0 /* Enable the identity map */
movel %d0,MMUCR
nop /* sync i-pipeline */
movel #_vstart,%a0 /* jump to "virtual" space */
jmp %a0@
_vstart:
#endif /* CONFIG_MMU */
#ifdef CONFIG_ROMFS_FS
/*
* Move ROM filesystem above bss :-)
*/
lea __bss_start,%a0 /* get start of bss */
lea __bss_stop,%a1 /* set up destination */
movel %a0,%a2 /* copy of bss start */
movel 8(%a0),%d0 /* get size of ROMFS */
addql #8,%d0 /* allow for rounding */
andl #0xfffffffc, %d0 /* whole words */
addl %d0,%a0 /* copy from end */
addl %d0,%a1 /* copy from end */
movel %a1,_ramstart /* set start of ram */
_copy_romfs:
movel -(%a0),%d0 /* copy dword */
movel %d0,-(%a1)
cmpl %a0,%a2 /* check if at end */
bne _copy_romfs
#else /* CONFIG_ROMFS_FS */
lea __bss_stop,%a1
movel %a1,_ramstart
#endif /* CONFIG_ROMFS_FS */
/*
* Zero out the bss region.
*/
lea __bss_start,%a0 /* get start of bss */
lea __bss_stop,%a1 /* get end of bss */
clrl %d0 /* set value */
_clear_bss:
movel %d0,(%a0)+ /* clear each word */
cmpl %a0,%a1 /* check if at end */
bne _clear_bss
/*
* Load the current task pointer and stack.
*/
lea init_thread_union,%a0
lea THREAD_SIZE(%a0),%sp
#ifdef CONFIG_MMU
.global m68k_cputype
.global m68k_mmutype
.global m68k_fputype
.global m68k_machtype
movel #CPU_COLDFIRE,%d0
movel %d0,m68k_cputype /* Mark us as a ColdFire */
movel #MMU_COLDFIRE,%d0
movel %d0,m68k_mmutype
movel #FPUTYPE,%d0
movel %d0,m68k_fputype /* Mark FPU type */
movel #MACHINE,%d0
movel %d0,m68k_machtype /* Mark machine type */
lea init_task,%a2 /* Set "current" init task */
#endif
/*
* Assembler start up done, start code proper.
*/
jsr start_kernel /* start Linux kernel */
_exit:
jmp _exit /* should never get here */
/*****************************************************************************/
|
aixcc-public/challenge-001-exemplar-source
| 5,327
|
arch/m68k/coldfire/entry.S
|
/*
* entry.S -- interrupt and exception processing for ColdFire
*
* Copyright (C) 1999-2007, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
*
* Based on:
*
* linux/arch/m68k/kernel/entry.S
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
* ColdFire support by Greg Ungerer (gerg@snapgear.com)
* 5307 fixes by David W. Miller
* linux 2.4 support David McCullough <davidm@snapgear.com>
* Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
*/
#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
#ifdef CONFIG_COLDFIRE_SW_A7
/*
* Define software copies of the supervisor and user stack pointers.
*/
.bss
sw_ksp:
.long 0
sw_usp:
.long 0
#endif /* CONFIG_COLDFIRE_SW_A7 */
.text
.globl system_call
.globl resume
.globl ret_from_exception
.globl sys_call_table
.globl inthandler
enosys:
mov.l #sys_ni_syscall,%d3
bra 1f
ENTRY(system_call)
SAVE_ALL_SYS
move #0x2000,%sr /* enable intrs again */
GET_CURRENT(%d2)
cmpl #NR_syscalls,%d0
jcc enosys
lea sys_call_table,%a0
lsll #2,%d0 /* movel %a0@(%d0:l:4),%d3 */
movel %a0@(%d0),%d3
jeq enosys
1:
movel %sp,%d2 /* get thread_info pointer */
andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
movel %d2,%a0
movel %a0@,%a1 /* save top of frame */
movel %sp,%a1@(TASK_THREAD+THREAD_ESP0)
btst #(TIF_SYSCALL_TRACE%8),%a0@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
bnes 1f
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
jra ret_from_exception
1:
movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */
movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
addql #1,%d0
jeq ret_from_exception
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
RESTORE_SWITCH_STACK
addql #4,%sp
ret_from_exception:
move #0x2700,%sr /* disable intrs */
btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
jeq Luser_return /* if so, skip resched, signals */
#ifdef CONFIG_PREEMPTION
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
andl #(1<<TIF_NEED_RESCHED),%d1
jeq Lkernel_return
movel %a0@(TINFO_PREEMPT),%d1
cmpl #0,%d1
jne Lkernel_return
pea Lkernel_return
jmp preempt_schedule_irq /* preempt the kernel */
#endif
Lkernel_return:
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
movel %sp@+,%d0
addql #4,%sp /* orig d0 */
addl %sp@+,%sp /* stk adj */
rte
Luser_return:
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
moveb %a0@(TINFO_FLAGS+3),%d1 /* thread_info->flags (low 8 bits) */
jne Lwork_to_do /* still work to do */
Lreturn:
RESTORE_USER
Lwork_to_do:
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
move #0x2000,%sr /* enable intrs again */
btst #TIF_NEED_RESCHED,%d1
jne reschedule
Lsignal_return:
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jsr do_notify_resume
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jmp Luser_return
/*
* This is the generic interrupt handler (for all hardware interrupt
* sources). Calls up to high level code to do all the work.
*/
ENTRY(inthandler)
SAVE_ALL_INT
GET_CURRENT(%d2)
movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
andl #0x03fc,%d0 /* mask out vector only */
movel %sp,%sp@- /* push regs arg */
lsrl #2,%d0 /* calculate real vector # */
movel %d0,%sp@- /* push vector number */
jbsr do_IRQ /* call high level irq handler */
lea %sp@(8),%sp /* pop args off stack */
bra ret_from_exception
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1, so don't change these
* registers until their contents are no longer needed.
*/
ENTRY(resume)
movew %sr,%d1 /* save current status */
movew %d1,%a0@(TASK_THREAD+THREAD_SR)
movel %a0,%d1 /* get prev thread in d1 */
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
RDUSP /* movel %usp,%a3 */
movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
#ifdef CONFIG_MMU
movel %a1,%a2 /* set new current */
#endif
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
WRUSP /* movel %a3,%usp */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
movew %a1@(TASK_THREAD+THREAD_SR),%d7 /* restore new status */
movew %d7,%sr
RESTORE_SWITCH_STACK
rts
|
aixcc-public/challenge-001-exemplar-source
| 90,395
|
arch/m68k/kernel/head.S
|
/* -*- mode: asm -*-
**
** head.S -- This file contains the initial boot code for the
** Linux/68k kernel.
**
** Copyright 1993 by Hamish Macdonald
**
** 68040 fixes by Michael Rausch
** 68060 fixes by Roman Hodek
** MMU cleanup by Randy Thelen
** Final MMU cleanup by Roman Zippel
**
** Atari support by Andreas Schwab, using ideas of Robert de Vries
** and Bjoern Brauel
** VME Support by Richard Hirst
**
** 94/11/14 Andreas Schwab: put kernel at PAGESIZE
** 94/11/18 Andreas Schwab: remove identity mapping of STRAM for Atari
** ++ Bjoern & Roman: ATARI-68040 support for the Medusa
** 95/11/18 Richard Hirst: Added MVME166 support
** 96/04/26 Guenther Kelleter: fixed identity mapping for Falcon with
** Magnum- and FX-alternate ram
** 98/04/25 Phil Blundell: added HP300 support
** 1998/08/30 David Kilzer: Added support for font_desc structures
** for linux-2.1.115
** 1999/02/11 Richard Zidlicky: added Q40 support (initial version 99/01/01)
** 2004/05/13 Kars de Jong: Finalised HP300 support
**
** This file is subject to the terms and conditions of the GNU General Public
** License. See the file README.legal in the main directory of this archive
** for more details.
**
*/
/*
* Linux startup code.
*
* At this point, the boot loader has:
* Disabled interrupts
* Disabled caches
* Put us in supervisor state.
*
* The kernel setup code takes the following steps:
* . Raise interrupt level
* . Set up initial kernel memory mapping.
* . This sets up a mapping of the 4M of memory the kernel is located in.
* . It also does a mapping of any initial machine specific areas.
* . Enable the MMU
* . Enable cache memories
* . Jump to kernel startup
*
* Much of the file restructuring was to accomplish:
* 1) Remove register dependency through-out the file.
* 2) Increase use of subroutines to perform functions
* 3) Increase readability of the code
*
* Of course, readability is a subjective issue, so it will never be
* argued that that goal was accomplished. It was merely a goal.
* A key way to help make code more readable is to give good
* documentation. So, the first thing you will find is exhaustive
* write-ups on the structure of the file, and the features of the
* functional subroutines.
*
* General Structure:
* ------------------
* Without a doubt the single largest chunk of head.S is spent
* mapping the kernel and I/O physical space into the logical range
* for the kernel.
* There are new subroutines and data structures to make MMU
* support cleaner and easier to understand.
* First, you will find a routine call "mmu_map" which maps
* a logical to a physical region for some length given a cache
* type on behalf of the caller. This routine makes writing the
* actual per-machine specific code very simple.
* A central part of the code, but not a subroutine in itself,
* is the mmu_init code which is broken down into mapping the kernel
* (the same for all machines) and mapping machine-specific I/O
* regions.
* Also, there will be a description of engaging the MMU and
* caches.
* You will notice that there is a chunk of code which
* can emit the entire MMU mapping of the machine. This is present
* only in debug modes and can be very helpful.
* Further, there is a new console driver in head.S that is
* also only engaged in debug mode. Currently, it's only supported
* on the Macintosh class of machines. However, it is hoped that
* others will plug-in support for specific machines.
*
* ######################################################################
*
* mmu_map
* -------
* mmu_map was written for two key reasons. First, it was clear
* that it was very difficult to read the previous code for mapping
* regions of memory. Second, the Macintosh required such extensive
* memory allocations that it didn't make sense to propagate the
* existing code any further.
* mmu_map requires some parameters:
*
* mmu_map (logical, physical, length, cache_type)
*
* While this essentially describes the function in the abstract, you'll
* find more indepth description of other parameters at the implementation site.
*
* mmu_get_root_table_entry
* ------------------------
* mmu_get_ptr_table_entry
* -----------------------
* mmu_get_page_table_entry
* ------------------------
*
* These routines are used by other mmu routines to get a pointer into
* a table, if necessary a new table is allocated. These routines are working
* basically like pmd_alloc() and pte_alloc() in <asm/pgtable.h>. The root
* table needs of course only to be allocated once in mmu_get_root_table_entry,
* so that here also some mmu specific initialization is done. The second page
* at the start of the kernel (the first page is unmapped later) is used for
* the kernel_pg_dir. It must be at a position known at link time (as it's used
* to initialize the init task struct) and since it needs special cache
* settings, it's the easiest to use this page, the rest of the page is used
* for further pointer tables.
* mmu_get_page_table_entry allocates always a whole page for page tables, this
* means 1024 pages and so 4MB of memory can be mapped. It doesn't make sense
* to manage page tables in smaller pieces as nearly all mappings have that
* size.
*
* ######################################################################
*
*
* ######################################################################
*
* mmu_engage
* ----------
* Thanks to a small helping routine enabling the mmu got quite simple
* and there is only one way left. mmu_engage makes a complete a new mapping
* that only includes the absolute necessary to be able to jump to the final
* position and to restore the original mapping.
* As this code doesn't need a transparent translation register anymore this
* means all registers are free to be used by machines that needs them for
* other purposes.
*
* ######################################################################
*
* mmu_print
* ---------
* This algorithm will print out the page tables of the system as
* appropriate for an 030 or an 040. This is useful for debugging purposes
* and as such is enclosed in #ifdef MMU_PRINT/#endif clauses.
*
* ######################################################################
*
* console_init
* ------------
* The console is also able to be turned off. The console in head.S
* is specifically for debugging and can be very useful. It is surrounded by
* #ifdef / #endif clauses so it doesn't have to ship in known-good
* kernels. It's basic algorithm is to determine the size of the screen
* (in height/width and bit depth) and then use that information for
* displaying an 8x8 font or an 8x16 (widthxheight). I prefer the 8x8 for
* debugging so I can see more good data. But it was trivial to add support
* for both fonts, so I included it.
* Also, the algorithm for plotting pixels is abstracted so that in
* theory other platforms could add support for different kinds of frame
* buffers. This could be very useful.
*
* console_put_penguin
* -------------------
* An important part of any Linux bring up is the penguin and there's
* nothing like getting the Penguin on the screen! This algorithm will work
* on any machine for which there is a console_plot_pixel.
*
* console_scroll
* --------------
* My hope is that the scroll algorithm does the right thing on the
* various platforms, but it wouldn't be hard to add the test conditions
* and new code if it doesn't.
*
* console_putc
* -------------
*
* ######################################################################
*
* Register usage has greatly simplified within head.S. Every subroutine
* saves and restores all registers that it modifies (except it returns a
* value in there of course). So the only register that needs to be initialized
* is the stack pointer.
* All other init code and data is now placed in the init section, so it will
* be automatically freed at the end of the kernel initialization.
*
* ######################################################################
*
* options
* -------
* There are many options available in a build of this file. I've
* taken the time to describe them here to save you the time of searching
* for them and trying to understand what they mean.
*
* CONFIG_xxx: These are the obvious machine configuration defines created
* during configuration. These are defined in autoconf.h.
*
* CONSOLE_DEBUG: Only supports a Mac frame buffer but could easily be
* extended to support other platforms.
*
* TEST_MMU: This is a test harness for running on any given machine but
* getting an MMU dump for another class of machine. The classes of machines
* that can be tested are any of the makes (Atari, Amiga, Mac, VME, etc.)
* and any of the models (030, 040, 060, etc.).
*
* NOTE: TEST_MMU is NOT permanent! It is scheduled to be removed
* When head.S boots on Atari, Amiga, Macintosh, and VME
* machines. At that point the underlying logic will be
* believed to be solid enough to be trusted, and TEST_MMU
* can be dropped. Do note that that will clean up the
* head.S code significantly as large blocks of #if/#else
* clauses can be removed.
*
* MMU_NOCACHE_KERNEL: On the Macintosh platform there was an inquiry into
* determing why devices don't appear to work. A test case was to remove
* the cacheability of the kernel bits.
*
* MMU_PRINT: There is a routine built into head.S that can display the
* MMU data structures. It outputs its result through the serial_putc
* interface. So where ever that winds up driving data, that's where the
* mmu struct will appear.
*
* SERIAL_DEBUG: There are a series of putc() macro statements
* scattered through out the code to give progress of status to the
* person sitting at the console. This constant determines whether those
* are used.
*
* DEBUG: This is the standard DEBUG flag that can be set for building
* the kernel. It has the effect adding additional tests into
* the code.
*
* FONT_6x11:
* FONT_8x8:
* FONT_8x16:
* In theory these could be determined at run time or handed
* over by the booter. But, let's be real, it's a fine hard
* coded value. (But, you will notice the code is run-time
* flexible!) A pointer to the font's struct font_desc
* is kept locally in Lconsole_font. It is used to determine
* font size information dynamically.
*
* Atari constants:
* USE_PRINTER: Use the printer port for serial debug.
* USE_SCC_B: Use the SCC port A (Serial2) for serial debug.
* USE_SCC_A: Use the SCC port B (Modem2) for serial debug.
* USE_MFP: Use the ST-MFP port (Modem1) for serial debug.
*
* Macintosh constants:
* MAC_USE_SCC_A: Use SCC port A (modem) for serial debug.
* MAC_USE_SCC_B: Use SCC port B (printer) for serial debug.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/pgtable.h>
#include <asm/bootinfo.h>
#include <asm/bootinfo-amiga.h>
#include <asm/bootinfo-atari.h>
#include <asm/bootinfo-hp300.h>
#include <asm/bootinfo-mac.h>
#include <asm/bootinfo-q40.h>
#include <asm/bootinfo-virt.h>
#include <asm/bootinfo-vme.h>
#include <asm/setup.h>
#include <asm/entry.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_MAC
# include <asm/machw.h>
#endif
#ifdef CONFIG_EARLY_PRINTK
# define SERIAL_DEBUG
# if defined(CONFIG_MAC) && defined(CONFIG_FONT_SUPPORT)
# define CONSOLE_DEBUG
# endif
#endif
#undef MMU_PRINT
#undef MMU_NOCACHE_KERNEL
#undef DEBUG
/*
* For the head.S console, there are three supported fonts, 6x11, 8x16 and 8x8.
* The 8x8 font is harder to read but fits more on the screen.
*/
#define FONT_8x8 /* default */
/* #define FONT_8x16 */ /* 2nd choice */
/* #define FONT_6x11 */ /* 3rd choice */
.globl kernel_pg_dir
.globl availmem
.globl m68k_init_mapped_size
.globl m68k_pgtable_cachemode
.globl m68k_supervisor_cachemode
#ifdef CONFIG_MVME16x
.globl mvme_bdid
#endif
#ifdef CONFIG_Q40
.globl q40_mem_cptr
#endif
CPUTYPE_040 = 1 /* indicates an 040 */
CPUTYPE_060 = 2 /* indicates an 060 */
CPUTYPE_0460 = 3 /* if either above are set, this is set */
CPUTYPE_020 = 4 /* indicates an 020 */
/* Translation control register */
TC_ENABLE = 0x8000
TC_PAGE8K = 0x4000
TC_PAGE4K = 0x0000
/* Transparent translation registers */
TTR_ENABLE = 0x8000 /* enable transparent translation */
TTR_ANYMODE = 0x4000 /* user and kernel mode access */
TTR_KERNELMODE = 0x2000 /* only kernel mode access */
TTR_USERMODE = 0x0000 /* only user mode access */
TTR_CI = 0x0400 /* inhibit cache */
TTR_RW = 0x0200 /* read/write mode */
TTR_RWM = 0x0100 /* read/write mask */
TTR_FCB2 = 0x0040 /* function code base bit 2 */
TTR_FCB1 = 0x0020 /* function code base bit 1 */
TTR_FCB0 = 0x0010 /* function code base bit 0 */
TTR_FCM2 = 0x0004 /* function code mask bit 2 */
TTR_FCM1 = 0x0002 /* function code mask bit 1 */
TTR_FCM0 = 0x0001 /* function code mask bit 0 */
/* Cache Control registers */
CC6_ENABLE_D = 0x80000000 /* enable data cache (680[46]0) */
CC6_FREEZE_D = 0x40000000 /* freeze data cache (68060) */
CC6_ENABLE_SB = 0x20000000 /* enable store buffer (68060) */
CC6_PUSH_DPI = 0x10000000 /* disable CPUSH invalidation (68060) */
CC6_HALF_D = 0x08000000 /* half-cache mode for data cache (68060) */
CC6_ENABLE_B = 0x00800000 /* enable branch cache (68060) */
CC6_CLRA_B = 0x00400000 /* clear all entries in branch cache (68060) */
CC6_CLRU_B = 0x00200000 /* clear user entries in branch cache (68060) */
CC6_ENABLE_I = 0x00008000 /* enable instruction cache (680[46]0) */
CC6_FREEZE_I = 0x00004000 /* freeze instruction cache (68060) */
CC6_HALF_I = 0x00002000 /* half-cache mode for instruction cache (68060) */
CC3_ALLOC_WRITE = 0x00002000 /* write allocate mode(68030) */
CC3_ENABLE_DB = 0x00001000 /* enable data burst (68030) */
CC3_CLR_D = 0x00000800 /* clear data cache (68030) */
CC3_CLRE_D = 0x00000400 /* clear entry in data cache (68030) */
CC3_FREEZE_D = 0x00000200 /* freeze data cache (68030) */
CC3_ENABLE_D = 0x00000100 /* enable data cache (68030) */
CC3_ENABLE_IB = 0x00000010 /* enable instruction burst (68030) */
CC3_CLR_I = 0x00000008 /* clear instruction cache (68030) */
CC3_CLRE_I = 0x00000004 /* clear entry in instruction cache (68030) */
CC3_FREEZE_I = 0x00000002 /* freeze instruction cache (68030) */
CC3_ENABLE_I = 0x00000001 /* enable instruction cache (68030) */
/* Miscellaneous definitions */
PAGESIZE = 4096
PAGESHIFT = 12
ROOT_TABLE_SIZE = 128
PTR_TABLE_SIZE = 128
PAGE_TABLE_SIZE = 64
ROOT_INDEX_SHIFT = 25
PTR_INDEX_SHIFT = 18
PAGE_INDEX_SHIFT = 12
#ifdef DEBUG
/* When debugging use readable names for labels */
#ifdef __STDC__
#define L(name) .head.S.##name
#else
#define L(name) .head.S./**/name
#endif
#else
#ifdef __STDC__
#define L(name) .L##name
#else
#define L(name) .L/**/name
#endif
#endif
/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
#ifndef __INITDATA
#define __INITDATA .data
#define __FINIT .previous
#endif
/* Several macros to make the writing of subroutines easier:
* - func_start marks the beginning of the routine which setups the frame
* register and saves the registers, it also defines another macro
* to automatically restore the registers again.
* - func_return marks the end of the routine and simply calls the prepared
* macro to restore registers and jump back to the caller.
* - func_define generates another macro to automatically put arguments
* onto the stack call the subroutine and cleanup the stack again.
*/
/* Within subroutines these macros can be used to access the arguments
* on the stack. With STACK some allocated memory on the stack can be
* accessed and ARG0 points to the return address (used by mmu_engage).
*/
#define STACK %a6@(stackstart)
#define ARG0 %a6@(4)
#define ARG1 %a6@(8)
#define ARG2 %a6@(12)
#define ARG3 %a6@(16)
#define ARG4 %a6@(20)
.macro func_start name,saveregs,stack=0
L(\name):
linkw %a6,#-\stack
moveml \saveregs,%sp@-
.set stackstart,-\stack
.macro func_return_\name
moveml %sp@+,\saveregs
unlk %a6
rts
.endm
.endm
.macro func_return name
func_return_\name
.endm
.macro func_call name
jbsr L(\name)
.endm
.macro move_stack nr,arg1,arg2,arg3,arg4
.if \nr
move_stack "(\nr-1)",\arg2,\arg3,\arg4
movel \arg1,%sp@-
.endif
.endm
.macro func_define name,nr=0
.macro \name arg1,arg2,arg3,arg4
move_stack \nr,\arg1,\arg2,\arg3,\arg4
func_call \name
.if \nr
lea %sp@(\nr*4),%sp
.endif
.endm
.endm
func_define mmu_map,4
func_define mmu_map_tt,4
func_define mmu_fixup_page_mmu_cache,1
func_define mmu_temp_map,2
func_define mmu_engage
func_define mmu_get_root_table_entry,1
func_define mmu_get_ptr_table_entry,2
func_define mmu_get_page_table_entry,2
func_define mmu_print
func_define get_new_page
#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
func_define set_leds
#endif
.macro mmu_map_eq arg1,arg2,arg3
mmu_map \arg1,\arg1,\arg2,\arg3
.endm
.macro get_bi_record record
pea \record
func_call get_bi_record
addql #4,%sp
.endm
func_define serial_putc,1
func_define console_putc,1
func_define console_init
func_define console_put_penguin
func_define console_plot_pixel,3
func_define console_scroll
.macro putc ch
#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
pea \ch
#endif
#ifdef CONSOLE_DEBUG
func_call console_putc
#endif
#ifdef SERIAL_DEBUG
func_call serial_putc
#endif
#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
addql #4,%sp
#endif
.endm
.macro dputc ch
#ifdef DEBUG
putc \ch
#endif
.endm
func_define putn,1
.macro dputn nr
#ifdef DEBUG
putn \nr
#endif
.endm
.macro puts string
#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
__INITDATA
.Lstr\@:
.string "\string"
__FINIT
pea %pc@(.Lstr\@)
func_call puts
addql #4,%sp
#endif
.endm
.macro dputs string
#ifdef DEBUG
puts "\string"
#endif
.endm
#define is_not_amiga(lab) cmpl &MACH_AMIGA,%pc@(m68k_machtype); jne lab
#define is_not_atari(lab) cmpl &MACH_ATARI,%pc@(m68k_machtype); jne lab
#define is_not_mac(lab) cmpl &MACH_MAC,%pc@(m68k_machtype); jne lab
#define is_not_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jne lab
#define is_not_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jne lab
#define is_not_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jne lab
#define is_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jeq lab
#define is_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jeq lab
#define is_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jeq lab
#define is_not_hp300(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); jne lab
#define is_not_apollo(lab) cmpl &MACH_APOLLO,%pc@(m68k_machtype); jne lab
#define is_not_q40(lab) cmpl &MACH_Q40,%pc@(m68k_machtype); jne lab
#define is_not_sun3x(lab) cmpl &MACH_SUN3X,%pc@(m68k_machtype); jne lab
#define is_not_virt(lab) cmpl &MACH_VIRT,%pc@(m68k_machtype); jne lab
#define hasnt_leds(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); \
jeq 42f; \
cmpl &MACH_APOLLO,%pc@(m68k_machtype); \
jne lab ;\
42:\
#define is_040_or_060(lab) btst &CPUTYPE_0460,%pc@(L(cputype)+3); jne lab
#define is_not_040_or_060(lab) btst &CPUTYPE_0460,%pc@(L(cputype)+3); jeq lab
#define is_040(lab) btst &CPUTYPE_040,%pc@(L(cputype)+3); jne lab
#define is_060(lab) btst &CPUTYPE_060,%pc@(L(cputype)+3); jne lab
#define is_not_060(lab) btst &CPUTYPE_060,%pc@(L(cputype)+3); jeq lab
#define is_020(lab) btst &CPUTYPE_020,%pc@(L(cputype)+3); jne lab
#define is_not_020(lab) btst &CPUTYPE_020,%pc@(L(cputype)+3); jeq lab
/* On the HP300 we use the on-board LEDs for debug output before
the console is running. Writing a 1 bit turns the corresponding LED
_off_ - on the 340 bit 7 is towards the back panel of the machine. */
.macro leds mask
#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
hasnt_leds(.Lled\@)
pea \mask
func_call set_leds
addql #4,%sp
.Lled\@:
#endif
.endm
__HEAD
ENTRY(_stext)
/*
* Version numbers of the bootinfo interface
* The area from _stext to _start will later be used as kernel pointer table
*/
bras 1f /* Jump over bootinfo version numbers */
.long BOOTINFOV_MAGIC
.long MACH_AMIGA, AMIGA_BOOTI_VERSION
.long MACH_ATARI, ATARI_BOOTI_VERSION
.long MACH_MVME147, MVME147_BOOTI_VERSION
.long MACH_MVME16x, MVME16x_BOOTI_VERSION
.long MACH_BVME6000, BVME6000_BOOTI_VERSION
.long MACH_MAC, MAC_BOOTI_VERSION
.long MACH_Q40, Q40_BOOTI_VERSION
.long MACH_HP300, HP300_BOOTI_VERSION
.long 0
1: jra __start
.equ kernel_pg_dir,_stext
.equ .,_stext+PAGESIZE
ENTRY(_start)
jra __start
__INIT
ENTRY(__start)
/*
* Setup initial stack pointer
*/
lea %pc@(_stext),%sp
/*
* Record the CPU and machine type.
*/
get_bi_record BI_MACHTYPE
lea %pc@(m68k_machtype),%a1
movel %a0@,%a1@
get_bi_record BI_FPUTYPE
lea %pc@(m68k_fputype),%a1
movel %a0@,%a1@
get_bi_record BI_MMUTYPE
lea %pc@(m68k_mmutype),%a1
movel %a0@,%a1@
get_bi_record BI_CPUTYPE
lea %pc@(m68k_cputype),%a1
movel %a0@,%a1@
leds 0x1
#ifdef CONFIG_MAC
/*
* For Macintosh, we need to determine the display parameters early (at least
* while debugging it).
*/
is_not_mac(L(test_notmac))
get_bi_record BI_MAC_VADDR
lea %pc@(L(mac_videobase)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_VDEPTH
lea %pc@(L(mac_videodepth)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_VDIM
lea %pc@(L(mac_dimensions)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_VROW
lea %pc@(L(mac_rowbytes)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_SCCBASE
lea %pc@(L(mac_sccbase)),%a1
movel %a0@,%a1@
L(test_notmac):
#endif /* CONFIG_MAC */
#ifdef CONFIG_VIRT
is_not_virt(L(test_notvirt))
get_bi_record BI_VIRT_GF_TTY_BASE
lea %pc@(L(virt_gf_tty_base)),%a1
movel %a0@,%a1@
L(test_notvirt):
#endif /* CONFIG_VIRT */
/*
* There are ultimately two pieces of information we want for all kinds of
* processors CpuType and CacheBits. The CPUTYPE was passed in from booter
* and is converted here from a booter type definition to a separate bit
* number which allows for the standard is_0x0 macro tests.
*/
movel %pc@(m68k_cputype),%d0
/*
* Assume it's an 030
*/
clrl %d1
/*
* Test the BootInfo cputype for 060
*/
btst #CPUB_68060,%d0
jeq 1f
bset #CPUTYPE_060,%d1
bset #CPUTYPE_0460,%d1
jra 3f
1:
/*
* Test the BootInfo cputype for 040
*/
btst #CPUB_68040,%d0
jeq 2f
bset #CPUTYPE_040,%d1
bset #CPUTYPE_0460,%d1
jra 3f
2:
/*
* Test the BootInfo cputype for 020
*/
btst #CPUB_68020,%d0
jeq 3f
bset #CPUTYPE_020,%d1
jra 3f
3:
/*
* Record the cpu type
*/
lea %pc@(L(cputype)),%a0
movel %d1,%a0@
/*
* NOTE:
*
* Now the macros are valid:
* is_040_or_060
* is_not_040_or_060
* is_040
* is_060
* is_not_060
*/
/*
* Determine the cache mode for pages holding MMU tables
* and for supervisor mode, unused for '020 and '030
*/
clrl %d0
clrl %d1
is_not_040_or_060(L(save_cachetype))
/*
* '040 or '060
* d1 := cacheable write-through
* NOTE: The 68040 manual strongly recommends non-cached for MMU tables,
* but we have been using write-through since at least 2.0.29 so I
* guess it is OK.
*/
#ifdef CONFIG_060_WRITETHROUGH
/*
* If this is a 68060 board using drivers with cache coherency
* problems, then supervisor memory accesses need to be write-through
* also; otherwise, we want copyback.
*/
is_not_060(1f)
movel #_PAGE_CACHE040W,%d0
jra L(save_cachetype)
#endif /* CONFIG_060_WRITETHROUGH */
1:
movew #_PAGE_CACHE040,%d0
movel #_PAGE_CACHE040W,%d1
L(save_cachetype):
/* Save cache mode for supervisor mode and page tables
*/
lea %pc@(m68k_supervisor_cachemode),%a0
movel %d0,%a0@
lea %pc@(m68k_pgtable_cachemode),%a0
movel %d1,%a0@
/*
* raise interrupt level
*/
movew #0x2700,%sr
/*
If running on an Atari, determine the I/O base of the
serial port and test if we are running on a Medusa or Hades.
This test is necessary here, because on the Hades the serial
port is only accessible in the high I/O memory area.
The test whether it is a Medusa is done by writing to the byte at
phys. 0x0. This should result in a bus error on all other machines.
...should, but doesn't. The Afterburner040 for the Falcon has the
same behaviour (0x0..0x7 are no ROM shadow). So we have to do
another test to distinguish Medusa and AB040. This is a
read attempt for 0x00ff82fe phys. that should bus error on a Falcon
(+AB040), but is in the range where the Medusa always asserts DTACK.
The test for the Hades is done by reading address 0xb0000000. This
should give a bus error on the Medusa.
*/
#ifdef CONFIG_ATARI
is_not_atari(L(notypetest))
/* get special machine type (Medusa/Hades/AB40) */
moveq #0,%d3 /* default if tag doesn't exist */
get_bi_record BI_ATARI_MCH_TYPE
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(atari_mch_type),%a0
movel %d3,%a0@
1:
/* On the Hades, the iobase must be set up before opening the
* serial port. There are no I/O regs at 0x00ffxxxx at all. */
moveq #0,%d0
cmpl #ATARI_MACH_HADES,%d3
jbne 1f
movel #0xff000000,%d0 /* Hades I/O base addr: 0xff000000 */
1: lea %pc@(L(iobase)),%a0
movel %d0,%a0@
L(notypetest):
#endif
#ifdef CONFIG_VME
is_mvme147(L(getvmetype))
is_bvme6000(L(getvmetype))
is_not_mvme16x(L(gvtdone))
/* See if the loader has specified the BI_VME_TYPE tag. Recent
* versions of VMELILO and TFTPLILO do this. We have to do this
* early so we know how to handle console output. If the tag
* doesn't exist then we use the Bug for output on MVME16x.
*/
L(getvmetype):
get_bi_record BI_VME_TYPE
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(vme_brdtype),%a0
movel %d3,%a0@
1:
#ifdef CONFIG_MVME16x
is_not_mvme16x(L(gvtdone))
/* Need to get the BRD_ID info to differentiate between 162, 167,
* etc. This is available as a BI_VME_BRDINFO tag with later
* versions of VMELILO and TFTPLILO, otherwise we call the Bug.
*/
get_bi_record BI_VME_BRDINFO
tstl %d0
jpl 1f
/* Get pointer to board ID data from Bug */
movel %d2,%sp@-
trap #15
.word 0x70 /* trap 0x70 - .BRD_ID */
movel %sp@+,%a0
1:
lea %pc@(mvme_bdid),%a1
/* Structure is 32 bytes long */
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
#endif
L(gvtdone):
#endif
#ifdef CONFIG_HP300
is_not_hp300(L(nothp))
/* Get the address of the UART for serial debugging */
get_bi_record BI_HP300_UART_ADDR
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(L(uartbase)),%a0
movel %d3,%a0@
get_bi_record BI_HP300_UART_SCODE
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(L(uart_scode)),%a0
movel %d3,%a0@
1:
L(nothp):
#endif
/*
* Initialize serial port
*/
jbsr L(serial_init)
/*
* Initialize console
*/
#ifdef CONFIG_MAC
is_not_mac(L(nocon))
# ifdef CONSOLE_DEBUG
console_init
# ifdef CONFIG_LOGO
console_put_penguin
# endif /* CONFIG_LOGO */
# endif /* CONSOLE_DEBUG */
L(nocon):
#endif /* CONFIG_MAC */
putc '\n'
putc 'A'
leds 0x2
dputn %pc@(L(cputype))
dputn %pc@(m68k_supervisor_cachemode)
dputn %pc@(m68k_pgtable_cachemode)
dputc '\n'
/*
* Save physical start address of kernel
*/
lea %pc@(L(phys_kernel_start)),%a0
lea %pc@(_stext),%a1
subl #_stext,%a1
addl #PAGE_OFFSET,%a1
movel %a1,%a0@
putc 'B'
leds 0x4
/*
* mmu_init
*
* This block of code does what's necessary to map in the various kinds
* of machines for execution of Linux.
* First map the first 4, 8, or 16 MB of kernel code & data
*/
get_bi_record BI_MEMCHUNK
movel %a0@(4),%d0
movel #16*1024*1024,%d1
cmpl %d0,%d1
jls 1f
lsrl #1,%d1
cmpl %d0,%d1
jls 1f
lsrl #1,%d1
1:
lea %pc@(m68k_init_mapped_size),%a0
movel %d1,%a0@
mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
%pc@(m68k_supervisor_cachemode)
putc 'C'
#ifdef CONFIG_AMIGA
L(mmu_init_amiga):
is_not_amiga(L(mmu_init_not_amiga))
/*
* mmu_init_amiga
*/
putc 'D'
is_not_040_or_060(1f)
/*
* 040: Map the 16Meg range physical 0x0 up to logical 0x8000.0000
*/
mmu_map #0x80000000,#0,#0x01000000,#_PAGE_NOCACHE_S
/*
* Map the Zorro III I/O space with transparent translation
* for frame buffer memory etc.
*/
mmu_map_tt #1,#0x40000000,#0x20000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
1:
/*
* 030: Map the 32Meg range physical 0x0 up to logical 0x8000.0000
*/
mmu_map #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
mmu_map_tt #1,#0x40000000,#0x20000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
L(mmu_init_not_amiga):
#endif
#ifdef CONFIG_ATARI
L(mmu_init_atari):
is_not_atari(L(mmu_init_not_atari))
putc 'E'
/* On the Atari, we map the I/O region (phys. 0x00ffxxxx) by mapping
the last 16 MB of virtual address space to the first 16 MB (i.e.
0xffxxxxxx -> 0x00xxxxxx). For this, an additional pointer table is
needed. I/O ranges are marked non-cachable.
For the Medusa it is better to map the I/O region transparently
(i.e. 0xffxxxxxx -> 0xffxxxxxx), because some I/O registers are
accessible only in the high area.
On the Hades all I/O registers are only accessible in the high
area.
*/
/* I/O base addr for non-Medusa, non-Hades: 0x00000000 */
moveq #0,%d0
movel %pc@(atari_mch_type),%d3
cmpl #ATARI_MACH_MEDUSA,%d3
jbeq 2f
cmpl #ATARI_MACH_HADES,%d3
jbne 1f
2: movel #0xff000000,%d0 /* Medusa/Hades base addr: 0xff000000 */
1: movel %d0,%d3
is_040_or_060(L(spata68040))
/* Map everything non-cacheable, though not all parts really
* need to disable caches (crucial only for 0xff8000..0xffffff
* (standard I/O) and 0xf00000..0xf3ffff (IDE)). The remainder
* isn't really used, except for sometimes peeking into the
* ROMs (mirror at phys. 0x0), so caching isn't necessary for
* this. */
mmu_map #0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
L(spata68040):
mmu_map #0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(mmu_init_not_atari):
#endif
#ifdef CONFIG_Q40
is_not_q40(L(notq40))
/*
* add transparent mapping for 0xff00 0000 - 0xffff ffff
* non-cached serialized etc..
* this includes master chip, DAC, RTC and ISA ports
* 0xfe000000-0xfeffffff is for screen and ROM
*/
putc 'Q'
mmu_map_tt #0,#0xfe000000,#0x01000000,#_PAGE_CACHE040W
mmu_map_tt #1,#0xff000000,#0x01000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(notq40):
#endif
#ifdef CONFIG_HP300
is_not_hp300(L(nothp300))
/* On the HP300, we map the ROM, INTIO and DIO regions (phys. 0x00xxxxxx)
* by mapping 32MB (on 020/030) or 16 MB (on 040) from 0xf0xxxxxx -> 0x00xxxxxx).
* The ROM mapping is needed because the LEDs are mapped there too.
*/
is_040(1f)
/*
* 030: Map the 32Meg range physical 0x0 up to logical 0xf000.0000
*/
mmu_map #0xf0000000,#0,#0x02000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
1:
/*
* 040: Map the 16Meg range physical 0x0 up to logical 0xf000.0000
*/
mmu_map #0xf0000000,#0,#0x01000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(nothp300):
#endif /* CONFIG_HP300 */
#ifdef CONFIG_MVME147
is_not_mvme147(L(not147))
/*
* On MVME147 we have already created kernel page tables for
* 4MB of RAM at address 0, so now need to do a transparent
* mapping of the top of memory space. Make it 0.5GByte for now,
* so we can access on-board i/o areas.
*/
mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
L(not147):
#endif /* CONFIG_MVME147 */
#ifdef CONFIG_MVME16x
is_not_mvme16x(L(not16x))
/*
* On MVME16x we have already created kernel page tables for
* 4MB of RAM at address 0, so now need to do a transparent
* mapping of the top of memory space. Make it 0.5GByte for now.
* Supervisor only access, so transparent mapping doesn't
* clash with User code virtual address space.
* this covers IO devices, PROM and SRAM. The PROM and SRAM
* mapping is needed to allow 167Bug to run.
* IO is in the range 0xfff00000 to 0xfffeffff.
* PROM is 0xff800000->0xffbfffff and SRAM is
* 0xffe00000->0xffe1ffff.
*/
mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(not16x):
#endif /* CONFIG_MVME162 | CONFIG_MVME167 */
#ifdef CONFIG_BVME6000
is_not_bvme6000(L(not6000))
/*
* On BVME6000 we have already created kernel page tables for
* 4MB of RAM at address 0, so now need to do a transparent
* mapping of the top of memory space. Make it 0.5GByte for now,
* so we can access on-board i/o areas.
* Supervisor only access, so transparent mapping doesn't
* clash with User code virtual address space.
*/
mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(not6000):
#endif /* CONFIG_BVME6000 */
/*
* mmu_init_mac
*
* The Macintosh mappings are less clear.
*
* Even as of this writing, it is unclear how the
* Macintosh mappings will be done. However, as
* the first author of this code I'm proposing the
* following model:
*
* Map the kernel (that's already done),
* Map the I/O (on most machines that's the
* 0x5000.0000 ... 0x5300.0000 range,
* Map the video frame buffer using as few pages
* as absolutely (this requirement mostly stems from
* the fact that when the frame buffer is at
* 0x0000.0000 then we know there is valid RAM just
* above the screen that we don't want to waste!).
*
* By the way, if the frame buffer is at 0x0000.0000
* then the Macintosh is known as an RBV based Mac.
*
* By the way 2, the code currently maps in a bunch of
* regions. But I'd like to cut that out. (And move most
* of the mappings up into the kernel proper ... or only
* map what's necessary.)
*/
#ifdef CONFIG_MAC
L(mmu_init_mac):
is_not_mac(L(mmu_init_not_mac))
putc 'F'
is_not_040_or_060(1f)
moveq #_PAGE_NOCACHE_S,%d3
jbra 2f
1:
moveq #_PAGE_NOCACHE030,%d3
2:
/*
* Mac Note: screen address of logical 0xF000.0000 -> <screen physical>
* we simply map the 4MB that contains the videomem
*/
movel #VIDEOMEMMASK,%d0
andl %pc@(L(mac_videobase)),%d0
mmu_map #VIDEOMEMBASE,%d0,#VIDEOMEMSIZE,%d3
/* ROM from 4000 0000 to 4200 0000 (only for mac_reset()) */
mmu_map_eq #0x40000000,#0x02000000,%d3
/* IO devices (incl. serial port) from 5000 0000 to 5300 0000 */
mmu_map_eq #0x50000000,#0x03000000,%d3
/* Nubus slot space (video at 0xF0000000, rom at 0xF0F80000) */
mmu_map_tt #1,#0xf8000000,#0x08000000,%d3
jbra L(mmu_init_done)
L(mmu_init_not_mac):
#endif
#ifdef CONFIG_SUN3X
is_not_sun3x(L(notsun3x))
/* oh, the pain.. We're gonna want the prom code after
* starting the MMU, so we copy the mappings, translating
* from 8k -> 4k pages as we go.
*/
/* copy maps from 0xfee00000 to 0xff000000 */
movel #0xfee00000, %d0
moveq #ROOT_INDEX_SHIFT, %d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
movel #0xfee00000, %d0
moveq #PTR_INDEX_SHIFT, %d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1, %d0
mmu_get_ptr_table_entry %a0,%d0
movel #0xfee00000, %d0
moveq #PAGE_INDEX_SHIFT, %d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1, %d0
mmu_get_page_table_entry %a0,%d0
/* this is where the prom page table lives */
movel 0xfefe00d4, %a1
movel %a1@, %a1
movel #((0x200000 >> 13)-1), %d1
1:
movel %a1@+, %d3
movel %d3,%a0@+
addl #0x1000,%d3
movel %d3,%a0@+
dbra %d1,1b
/* setup tt1 for I/O */
mmu_map_tt #1,#0x40000000,#0x40000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(notsun3x):
#endif
#ifdef CONFIG_VIRT
is_not_virt(L(novirt))
mmu_map_tt #1,#0xFF000000,#0x01000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(novirt):
#endif
#ifdef CONFIG_APOLLO
is_not_apollo(L(notapollo))
putc 'P'
mmu_map #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
L(notapollo):
jbra L(mmu_init_done)
#endif
L(mmu_init_done):
putc 'G'
leds 0x8
/*
* mmu_fixup
*
* On the 040 class machines, all pages that are used for the
* mmu have to be fixed up. According to Motorola, pages holding mmu
* tables should be non-cacheable on a '040 and write-through on a
* '060. But analysis of the reasons for this, and practical
* experience, showed that write-through also works on a '040.
*
* Allocated memory so far goes from kernel_end to memory_start that
* is used for all kind of tables, for that the cache attributes
* are now fixed.
*/
L(mmu_fixup):
is_not_040_or_060(L(mmu_fixup_done))
#ifdef MMU_NOCACHE_KERNEL
jbra L(mmu_fixup_done)
#endif
/* first fix the page at the start of the kernel, that
* contains also kernel_pg_dir.
*/
movel %pc@(L(phys_kernel_start)),%d0
subl #PAGE_OFFSET,%d0
lea %pc@(_stext),%a0
subl %d0,%a0
mmu_fixup_page_mmu_cache %a0
movel %pc@(L(kernel_end)),%a0
subl %d0,%a0
movel %pc@(L(memory_start)),%a1
subl %d0,%a1
bra 2f
1:
mmu_fixup_page_mmu_cache %a0
addw #PAGESIZE,%a0
2:
cmpl %a0,%a1
jgt 1b
L(mmu_fixup_done):
#ifdef MMU_PRINT
mmu_print
#endif
/*
* mmu_engage
*
* This chunk of code performs the gruesome task of engaging the MMU.
* The reason it's gruesome is because when the MMU becomes engaged it
* maps logical addresses to physical addresses. The Program Counter
* register is then passed through the MMU before the next instruction
* is fetched (the instruction following the engage MMU instruction).
* This may mean one of two things:
* 1. The Program Counter falls within the logical address space of
* the kernel of which there are two sub-possibilities:
* A. The PC maps to the correct instruction (logical PC == physical
* code location), or
* B. The PC does not map through and the processor will read some
* data (or instruction) which is not the logically next instr.
* As you can imagine, A is good and B is bad.
* Alternatively,
* 2. The Program Counter does not map through the MMU. The processor
* will take a Bus Error.
* Clearly, 2 is bad.
* It doesn't take a wiz kid to figure you want 1.A.
* This code creates that possibility.
* There are two possible 1.A. states (we now ignore the other above states):
* A. The kernel is located at physical memory addressed the same as
* the logical memory for the kernel, i.e., 0x01000.
* B. The kernel is located some where else. e.g., 0x0400.0000
*
* Under some conditions the Macintosh can look like A or B.
* [A friend and I once noted that Apple hardware engineers should be
* wacked twice each day: once when they show up at work (as in, Whack!,
* "This is for the screwy hardware we know you're going to design today."),
* and also at the end of the day (as in, Whack! "I don't know what
* you designed today, but I'm sure it wasn't good."). -- rst]
*
* This code works on the following premise:
* If the kernel start (%d5) is within the first 16 Meg of RAM,
* then create a mapping for the kernel at logical 0x8000.0000 to
* the physical location of the pc. And, create a transparent
* translation register for the first 16 Meg. Then, after the MMU
* is engaged, the PC can be moved up into the 0x8000.0000 range
* and then the transparent translation can be turned off and then
* the PC can jump to the correct logical location and it will be
* home (finally). This is essentially the code that the Amiga used
* to use. Now, it's generalized for all processors. Which means
* that a fresh (but temporary) mapping has to be created. The mapping
* is made in page 0 (an as of yet unused location -- except for the
* stack!). This temporary mapping will only require 1 pointer table
* and a single page table (it can map 256K).
*
* OK, alternatively, imagine that the Program Counter is not within
* the first 16 Meg. Then, just use Transparent Translation registers
* to do the right thing.
*
* Last, if _start is already at 0x01000, then there's nothing special
* to do (in other words, in a degenerate case of the first case above,
* do nothing).
*
* Let's do it.
*
*
*/
putc 'H'
mmu_engage
/*
* After this point no new memory is allocated and
* the start of available memory is stored in availmem.
* (The bootmem allocator requires now the physical address.)
*/
movel L(memory_start),availmem
#ifdef CONFIG_AMIGA
is_not_amiga(1f)
/* fixup the Amiga custom register location before printing */
clrl L(custom)
1:
#endif
#ifdef CONFIG_ATARI
is_not_atari(1f)
/* fixup the Atari iobase register location before printing */
movel #0xff000000,L(iobase)
1:
#endif
#ifdef CONFIG_MAC
is_not_mac(1f)
movel #~VIDEOMEMMASK,%d0
andl L(mac_videobase),%d0
addl #VIDEOMEMBASE,%d0
movel %d0,L(mac_videobase)
#ifdef CONSOLE_DEBUG
movel %pc@(L(phys_kernel_start)),%d0
subl #PAGE_OFFSET,%d0
subl %d0,L(console_font)
subl %d0,L(console_font_data)
#endif
orl #0x50000000,L(mac_sccbase)
1:
#endif
#ifdef CONFIG_HP300
is_not_hp300(2f)
/*
* Fix up the iobase register to point to the new location of the LEDs.
*/
movel #0xf0000000,L(iobase)
/*
* Energise the FPU and caches.
*/
is_040(1f)
movel #0x60,0xf05f400c
jbra 2f
/*
* 040: slightly different, apparently.
*/
1: movew #0,0xf05f400e
movew #0x64,0xf05f400e
2:
#endif
#ifdef CONFIG_SUN3X
is_not_sun3x(1f)
/* enable copro */
oriw #0x4000,0x61000000
1:
#endif
#ifdef CONFIG_APOLLO
is_not_apollo(1f)
/*
* Fix up the iobase before printing
*/
movel #0x80000000,L(iobase)
1:
#endif
putc 'I'
leds 0x10
/*
* Enable caches
*/
is_not_040_or_060(L(cache_not_680460))
L(cache680460):
.chip 68040
nop
cpusha %bc
nop
is_060(L(cache68060))
movel #CC6_ENABLE_D+CC6_ENABLE_I,%d0
/* MMU stuff works in copyback mode now, so enable the cache */
movec %d0,%cacr
jra L(cache_done)
L(cache68060):
movel #CC6_ENABLE_D+CC6_ENABLE_I+CC6_ENABLE_SB+CC6_PUSH_DPI+CC6_ENABLE_B+CC6_CLRA_B,%d0
/* MMU stuff works in copyback mode now, so enable the cache */
movec %d0,%cacr
/* enable superscalar dispatch in PCR */
moveq #1,%d0
.chip 68060
movec %d0,%pcr
jbra L(cache_done)
L(cache_not_680460):
L(cache68030):
.chip 68030
movel #CC3_ENABLE_DB+CC3_CLR_D+CC3_ENABLE_D+CC3_ENABLE_IB+CC3_CLR_I+CC3_ENABLE_I,%d0
movec %d0,%cacr
jra L(cache_done)
.chip 68k
L(cache_done):
putc 'J'
/*
* Setup initial stack pointer
*/
lea init_task,%curptr
lea init_thread_union+THREAD_SIZE,%sp
putc 'K'
subl %a6,%a6 /* clear a6 for gdb */
/*
* The new 64bit printf support requires an early exception initialization.
*/
jbsr base_trap_init
/* jump to the kernel start */
putc '\n'
leds 0x55
jbsr start_kernel
/*
* Find a tag record in the bootinfo structure
* The bootinfo structure is located right after the kernel
* Returns: d0: size (-1 if not found)
* a0: data pointer (end-of-records if not found)
*/
func_start get_bi_record,%d1
movel ARG1,%d0
lea %pc@(_end),%a0
1: tstw %a0@(BIR_TAG)
jeq 3f
cmpw %a0@(BIR_TAG),%d0
jeq 2f
addw %a0@(BIR_SIZE),%a0
jra 1b
2: moveq #0,%d0
movew %a0@(BIR_SIZE),%d0
lea %a0@(BIR_DATA),%a0
jra 4f
3: moveq #-1,%d0
lea %a0@(BIR_SIZE),%a0
4:
func_return get_bi_record
/*
* MMU Initialization Begins Here
*
* The structure of the MMU tables on the 68k machines
* is thus:
* Root Table
* Logical addresses are translated through
* a hierarchical translation mechanism where the high-order
* seven bits of the logical address (LA) are used as an
* index into the "root table." Each entry in the root
* table has a bit which specifies if it's a valid pointer to a
* pointer table. Each entry defines a 32Meg range of memory.
* If an entry is invalid then that logical range of 32M is
* invalid and references to that range of memory (when the MMU
* is enabled) will fault. If the entry is valid, then it does
* one of two things. On 040/060 class machines, it points to
* a pointer table which then describes more finely the memory
* within that 32M range. On 020/030 class machines, a technique
* called "early terminating descriptors" are used. This technique
* allows an entire 32Meg to be described by a single entry in the
* root table. Thus, this entry in the root table, contains the
* physical address of the memory or I/O at the logical address
* which the entry represents and it also contains the necessary
* cache bits for this region.
*
* Pointer Tables
* Per the Root Table, there will be one or more
* pointer tables. Each pointer table defines a 32M range.
* Not all of the 32M range need be defined. Again, the next
* seven bits of the logical address are used an index into
* the pointer table to point to page tables (if the pointer
* is valid). There will undoubtedly be more than one
* pointer table for the kernel because each pointer table
* defines a range of only 32M. Valid pointer table entries
* point to page tables, or are early terminating entries
* themselves.
*
* Page Tables
* Per the Pointer Tables, each page table entry points
* to the physical page in memory that supports the logical
* address that translates to the particular index.
*
* In short, the Logical Address gets translated as follows:
* bits 31..26 - index into the Root Table
* bits 25..18 - index into the Pointer Table
* bits 17..12 - index into the Page Table
* bits 11..0 - offset into a particular 4K page
*
* The algorithms which follow do one thing: they abstract
* the MMU hardware. For example, there are three kinds of
* cache settings that are relevant. Either, memory is
* being mapped in which case it is either Kernel Code (or
* the RamDisk) or it is MMU data. On the 030, the MMU data
* option also describes the kernel. Or, I/O is being mapped
* in which case it has its own kind of cache bits. There
* are constants which abstract these notions from the code that
* actually makes the call to map some range of memory.
*
*
*
*/
#ifdef MMU_PRINT
/*
* mmu_print
*
* This algorithm will print out the current MMU mappings.
*
* Input:
* %a5 points to the root table. Everything else is calculated
* from this.
*/
#define mmu_next_valid 0
#define mmu_start_logical 4
#define mmu_next_logical 8
#define mmu_start_physical 12
#define mmu_next_physical 16
#define MMU_PRINT_INVALID -1
#define MMU_PRINT_VALID 1
#define MMU_PRINT_UNINITED 0
#define putZc(z,n) jbne 1f; putc z; jbra 2f; 1: putc n; 2:
func_start mmu_print,%a0-%a6/%d0-%d7
movel %pc@(L(kernel_pgdir_ptr)),%a5
lea %pc@(L(mmu_print_data)),%a0
movel #MMU_PRINT_UNINITED,%a0@(mmu_next_valid)
is_not_040_or_060(mmu_030_print)
mmu_040_print:
puts "\nMMU040\n"
puts "rp:"
putn %a5
putc '\n'
#if 0
/*
* The following #if/#endif block is a tight algorithm for dumping the 040
* MMU Map in gory detail. It really isn't that practical unless the
* MMU Map algorithm appears to go awry and you need to debug it at the
* entry per entry level.
*/
movel #ROOT_TABLE_SIZE,%d5
#if 0
movel %a5@+,%d7 | Burn an entry to skip the kernel mappings,
subql #1,%d5 | they (might) work
#endif
1: tstl %d5
jbeq mmu_print_done
subq #1,%d5
movel %a5@+,%d7
btst #1,%d7
jbeq 1b
2: putn %d7
andil #0xFFFFFE00,%d7
movel %d7,%a4
movel #PTR_TABLE_SIZE,%d4
putc ' '
3: tstl %d4
jbeq 11f
subq #1,%d4
movel %a4@+,%d7
btst #1,%d7
jbeq 3b
4: putn %d7
andil #0xFFFFFF00,%d7
movel %d7,%a3
movel #PAGE_TABLE_SIZE,%d3
5: movel #8,%d2
6: tstl %d3
jbeq 31f
subq #1,%d3
movel %a3@+,%d6
btst #0,%d6
jbeq 6b
7: tstl %d2
jbeq 8f
subq #1,%d2
putc ' '
jbra 91f
8: putc '\n'
movel #8+1+8+1+1,%d2
9: putc ' '
dbra %d2,9b
movel #7,%d2
91: putn %d6
jbra 6b
31: putc '\n'
movel #8+1,%d2
32: putc ' '
dbra %d2,32b
jbra 3b
11: putc '\n'
jbra 1b
#endif /* MMU 040 Dumping code that's gory and detailed */
lea %pc@(kernel_pg_dir),%a5
movel %a5,%a0 /* a0 has the address of the root table ptr */
movel #0x00000000,%a4 /* logical address */
moveql #0,%d0
40:
/* Increment the logical address and preserve in d5 */
movel %a4,%d5
addil #PAGESIZE<<13,%d5
movel %a0@+,%d6
btst #1,%d6
jbne 41f
jbsr mmu_print_tuple_invalidate
jbra 48f
41:
movel #0,%d1
andil #0xfffffe00,%d6
movel %d6,%a1
42:
movel %a4,%d5
addil #PAGESIZE<<6,%d5
movel %a1@+,%d6
btst #1,%d6
jbne 43f
jbsr mmu_print_tuple_invalidate
jbra 47f
43:
movel #0,%d2
andil #0xffffff00,%d6
movel %d6,%a2
44:
movel %a4,%d5
addil #PAGESIZE,%d5
movel %a2@+,%d6
btst #0,%d6
jbne 45f
jbsr mmu_print_tuple_invalidate
jbra 46f
45:
moveml %d0-%d1,%sp@-
movel %a4,%d0
movel %d6,%d1
andil #0xfffff4e0,%d1
lea %pc@(mmu_040_print_flags),%a6
jbsr mmu_print_tuple
moveml %sp@+,%d0-%d1
46:
movel %d5,%a4
addq #1,%d2
cmpib #64,%d2
jbne 44b
47:
movel %d5,%a4
addq #1,%d1
cmpib #128,%d1
jbne 42b
48:
movel %d5,%a4 /* move to the next logical address */
addq #1,%d0
cmpib #128,%d0
jbne 40b
.chip 68040
movec %dtt1,%d0
movel %d0,%d1
andiw #0x8000,%d1 /* is it valid ? */
jbeq 1f /* No, bail out */
movel %d0,%d1
andil #0xff000000,%d1 /* Get the address */
putn %d1
puts "=="
putn %d1
movel %d0,%d6
jbsr mmu_040_print_flags_tt
1:
movec %dtt0,%d0
movel %d0,%d1
andiw #0x8000,%d1 /* is it valid ? */
jbeq 1f /* No, bail out */
movel %d0,%d1
andil #0xff000000,%d1 /* Get the address */
putn %d1
puts "=="
putn %d1
movel %d0,%d6
jbsr mmu_040_print_flags_tt
1:
.chip 68k
jbra mmu_print_done
mmu_040_print_flags:
btstl #10,%d6
putZc(' ','G') /* global bit */
btstl #7,%d6
putZc(' ','S') /* supervisor bit */
mmu_040_print_flags_tt:
btstl #6,%d6
jbne 3f
putc 'C'
btstl #5,%d6
putZc('w','c') /* write through or copy-back */
jbra 4f
3:
putc 'N'
btstl #5,%d6
putZc('s',' ') /* serialized non-cacheable, or non-cacheable */
4:
rts
mmu_030_print_flags:
btstl #6,%d6
putZc('C','I') /* write through or copy-back */
rts
mmu_030_print:
puts "\nMMU030\n"
puts "\nrp:"
putn %a5
putc '\n'
movel %a5,%d0
andil #0xfffffff0,%d0
movel %d0,%a0
movel #0x00000000,%a4 /* logical address */
movel #0,%d0
30:
movel %a4,%d5
addil #PAGESIZE<<13,%d5
movel %a0@+,%d6
btst #1,%d6 /* is it a table ptr? */
jbne 31f /* yes */
btst #0,%d6 /* is it early terminating? */
jbeq 1f /* no */
jbsr mmu_030_print_helper
jbra 38f
1:
jbsr mmu_print_tuple_invalidate
jbra 38f
31:
movel #0,%d1
andil #0xfffffff0,%d6
movel %d6,%a1
32:
movel %a4,%d5
addil #PAGESIZE<<6,%d5
movel %a1@+,%d6
btst #1,%d6 /* is it a table ptr? */
jbne 33f /* yes */
btst #0,%d6 /* is it a page descriptor? */
jbeq 1f /* no */
jbsr mmu_030_print_helper
jbra 37f
1:
jbsr mmu_print_tuple_invalidate
jbra 37f
33:
movel #0,%d2
andil #0xfffffff0,%d6
movel %d6,%a2
34:
movel %a4,%d5
addil #PAGESIZE,%d5
movel %a2@+,%d6
btst #0,%d6
jbne 35f
jbsr mmu_print_tuple_invalidate
jbra 36f
35:
jbsr mmu_030_print_helper
36:
movel %d5,%a4
addq #1,%d2
cmpib #64,%d2
jbne 34b
37:
movel %d5,%a4
addq #1,%d1
cmpib #128,%d1
jbne 32b
38:
movel %d5,%a4 /* move to the next logical address */
addq #1,%d0
cmpib #128,%d0
jbne 30b
mmu_print_done:
puts "\n"
func_return mmu_print
mmu_030_print_helper:
moveml %d0-%d1,%sp@-
movel %a4,%d0
movel %d6,%d1
lea %pc@(mmu_030_print_flags),%a6
jbsr mmu_print_tuple
moveml %sp@+,%d0-%d1
rts
mmu_print_tuple_invalidate:
moveml %a0/%d7,%sp@-
lea %pc@(L(mmu_print_data)),%a0
tstl %a0@(mmu_next_valid)
jbmi mmu_print_tuple_invalidate_exit
movel #MMU_PRINT_INVALID,%a0@(mmu_next_valid)
putn %a4
puts "##\n"
mmu_print_tuple_invalidate_exit:
moveml %sp@+,%a0/%d7
rts
mmu_print_tuple:
moveml %d0-%d7/%a0,%sp@-
lea %pc@(L(mmu_print_data)),%a0
tstl %a0@(mmu_next_valid)
jble mmu_print_tuple_print
cmpl %a0@(mmu_next_physical),%d1
jbeq mmu_print_tuple_increment
mmu_print_tuple_print:
putn %d0
puts "->"
putn %d1
movel %d1,%d6
jbsr %a6@
mmu_print_tuple_record:
movel #MMU_PRINT_VALID,%a0@(mmu_next_valid)
movel %d1,%a0@(mmu_next_physical)
mmu_print_tuple_increment:
movel %d5,%d7
subl %a4,%d7
addl %d7,%a0@(mmu_next_physical)
mmu_print_tuple_exit:
moveml %sp@+,%d0-%d7/%a0
rts
mmu_print_machine_cpu_types:
puts "machine: "
is_not_amiga(1f)
puts "amiga"
jbra 9f
1:
is_not_atari(2f)
puts "atari"
jbra 9f
2:
is_not_mac(3f)
puts "macintosh"
jbra 9f
3: puts "unknown"
9: putc '\n'
puts "cputype: 0"
is_not_060(1f)
putc '6'
jbra 9f
1:
is_not_040_or_060(2f)
putc '4'
jbra 9f
2: putc '3'
9: putc '0'
putc '\n'
rts
#endif /* MMU_PRINT */
/*
* mmu_map_tt
*
* This is a specific function which works on all 680x0 machines.
* On 030, 040 & 060 it will attempt to use Transparent Translation
* registers (tt1).
* On 020 it will call the standard mmu_map which will use early
* terminating descriptors.
*/
func_start mmu_map_tt,%d0/%d1/%a0,4
dputs "mmu_map_tt:"
dputn ARG1
dputn ARG2
dputn ARG3
dputn ARG4
dputc '\n'
is_020(L(do_map))
/* Extract the highest bit set
*/
bfffo ARG3{#0,#32},%d1
cmpw #8,%d1
jcc L(do_map)
/* And get the mask
*/
moveq #-1,%d0
lsrl %d1,%d0
lsrl #1,%d0
/* Mask the address
*/
movel %d0,%d1
notl %d1
andl ARG2,%d1
/* Generate the upper 16bit of the tt register
*/
lsrl #8,%d0
orl %d0,%d1
clrw %d1
is_040_or_060(L(mmu_map_tt_040))
/* set 030 specific bits (read/write access for supervisor mode
* (highest function code set, lower two bits masked))
*/
orw #TTR_ENABLE+TTR_RWM+TTR_FCB2+TTR_FCM1+TTR_FCM0,%d1
movel ARG4,%d0
btst #6,%d0
jeq 1f
orw #TTR_CI,%d1
1: lea STACK,%a0
dputn %d1
movel %d1,%a0@
.chip 68030
tstl ARG1
jne 1f
pmove %a0@,%tt0
jra 2f
1: pmove %a0@,%tt1
2: .chip 68k
jra L(mmu_map_tt_done)
/* set 040 specific bits
*/
L(mmu_map_tt_040):
orw #TTR_ENABLE+TTR_KERNELMODE,%d1
orl ARG4,%d1
dputn %d1
.chip 68040
tstl ARG1
jne 1f
movec %d1,%itt0
movec %d1,%dtt0
jra 2f
1: movec %d1,%itt1
movec %d1,%dtt1
2: .chip 68k
jra L(mmu_map_tt_done)
L(do_map):
mmu_map_eq ARG2,ARG3,ARG4
L(mmu_map_tt_done):
func_return mmu_map_tt
/*
* mmu_map
*
* This routine will map a range of memory using a pointer
* table and allocate the pages on the fly from the kernel.
* The pointer table does not have to be already linked into
* the root table, this routine will do that if necessary.
*
* NOTE
* This routine will assert failure and use the serial_putc
* routines in the case of a run-time error. For example,
* if the address is already mapped.
*
* NOTE-2
* This routine will use early terminating descriptors
* where possible for the 68020+68851 and 68030 type
* processors.
*/
func_start mmu_map,%d0-%d4/%a0-%a4
dputs "\nmmu_map:"
dputn ARG1
dputn ARG2
dputn ARG3
dputn ARG4
dputc '\n'
/* Get logical address and round it down to 256KB
*/
movel ARG1,%d0
andl #-(PAGESIZE*PAGE_TABLE_SIZE),%d0
movel %d0,%a3
/* Get the end address
*/
movel ARG1,%a4
addl ARG3,%a4
subql #1,%a4
/* Get physical address and round it down to 256KB
*/
movel ARG2,%d0
andl #-(PAGESIZE*PAGE_TABLE_SIZE),%d0
movel %d0,%a2
/* Add page attributes to the physical address
*/
movel ARG4,%d0
orw #_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
addw %d0,%a2
dputn %a2
dputn %a3
dputn %a4
is_not_040_or_060(L(mmu_map_030))
addw #_PAGE_GLOBAL040,%a2
/*
* MMU 040 & 060 Support
*
* The MMU usage for the 040 and 060 is different enough from
* the 030 and 68851 that there is separate code. This comment
* block describes the data structures and algorithms built by
* this code.
*
* The 040 does not support early terminating descriptors, as
* the 030 does. Therefore, a third level of table is needed
* for the 040, and that would be the page table. In Linux,
* page tables are allocated directly from the memory above the
* kernel.
*
*/
L(mmu_map_040):
/* Calculate the offset into the root table
*/
movel %a3,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Calculate the offset into the pointer table
*/
movel %a3,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
mmu_get_ptr_table_entry %a0,%d0
/* Calculate the offset into the page table
*/
movel %a3,%d0
moveq #PAGE_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1,%d0
mmu_get_page_table_entry %a0,%d0
/* The page table entry must not no be busy
*/
tstl %a0@
jne L(mmu_map_error)
/* Do the mapping and advance the pointers
*/
movel %a2,%a0@
2:
addw #PAGESIZE,%a2
addw #PAGESIZE,%a3
/* Ready with mapping?
*/
lea %a3@(-1),%a0
cmpl %a0,%a4
jhi L(mmu_map_040)
jra L(mmu_map_done)
L(mmu_map_030):
/* Calculate the offset into the root table
*/
movel %a3,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Check if logical address 32MB aligned,
* so we can try to map it once
*/
movel %a3,%d0
andl #(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1)&(-ROOT_TABLE_SIZE),%d0
jne 1f
/* Is there enough to map for 32MB at once
*/
lea %a3@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1),%a1
cmpl %a1,%a4
jcs 1f
addql #1,%a1
/* The root table entry must not no be busy
*/
tstl %a0@
jne L(mmu_map_error)
/* Do the mapping and advance the pointers
*/
dputs "early term1"
dputn %a2
dputn %a3
dputn %a1
dputc '\n'
movel %a2,%a0@
movel %a1,%a3
lea %a2@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE),%a2
jra L(mmu_mapnext_030)
1:
/* Calculate the offset into the pointer table
*/
movel %a3,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
mmu_get_ptr_table_entry %a0,%d0
/* The pointer table entry must not no be busy
*/
tstl %a0@
jne L(mmu_map_error)
/* Do the mapping and advance the pointers
*/
dputs "early term2"
dputn %a2
dputn %a3
dputc '\n'
movel %a2,%a0@
addl #PAGE_TABLE_SIZE*PAGESIZE,%a2
addl #PAGE_TABLE_SIZE*PAGESIZE,%a3
L(mmu_mapnext_030):
/* Ready with mapping?
*/
lea %a3@(-1),%a0
cmpl %a0,%a4
jhi L(mmu_map_030)
jra L(mmu_map_done)
L(mmu_map_error):
dputs "mmu_map error:"
dputn %a2
dputn %a3
dputc '\n'
L(mmu_map_done):
func_return mmu_map
/*
* mmu_fixup
*
* On the 040 class machines, all pages that are used for the
* mmu have to be fixed up.
*/
func_start mmu_fixup_page_mmu_cache,%d0/%a0
dputs "mmu_fixup_page_mmu_cache"
dputn ARG1
/* Calculate the offset into the root table
*/
movel ARG1,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Calculate the offset into the pointer table
*/
movel ARG1,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
mmu_get_ptr_table_entry %a0,%d0
/* Calculate the offset into the page table
*/
movel ARG1,%d0
moveq #PAGE_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1,%d0
mmu_get_page_table_entry %a0,%d0
movel %a0@,%d0
andil #_CACHEMASK040,%d0
orl %pc@(m68k_pgtable_cachemode),%d0
movel %d0,%a0@
dputc '\n'
func_return mmu_fixup_page_mmu_cache
/*
* mmu_temp_map
*
* create a temporary mapping to enable the mmu,
* this we don't need any transparation translation tricks.
*/
func_start mmu_temp_map,%d0/%d1/%a0/%a1
dputs "mmu_temp_map"
dputn ARG1
dputn ARG2
dputc '\n'
lea %pc@(L(temp_mmap_mem)),%a1
/* Calculate the offset in the root table
*/
movel ARG2,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Check if the table is temporary allocated, so we have to reuse it
*/
movel %a0@,%d0
cmpl %pc@(L(memory_start)),%d0
jcc 1f
/* Temporary allocate a ptr table and insert it into the root table
*/
movel %a1@,%d0
addl #PTR_TABLE_SIZE*4,%a1@
orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
movel %d0,%a0@
dputs " (new)"
1:
dputn %d0
/* Mask the root table entry for the ptr table
*/
andw #-ROOT_TABLE_SIZE,%d0
movel %d0,%a0
/* Calculate the offset into the pointer table
*/
movel ARG2,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
lea %a0@(%d0*4),%a0
dputn %a0
/* Check if a temporary page table is already allocated
*/
movel %a0@,%d0
jne 1f
/* Temporary allocate a page table and insert it into the ptr table
*/
movel %a1@,%d0
/* The 512 should be PAGE_TABLE_SIZE*4, but that violates the
alignment restriction for pointer tables on the '0[46]0. */
addl #512,%a1@
orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
movel %d0,%a0@
dputs " (new)"
1:
dputn %d0
/* Mask the ptr table entry for the page table
*/
andw #-PTR_TABLE_SIZE,%d0
movel %d0,%a0
/* Calculate the offset into the page table
*/
movel ARG2,%d0
moveq #PAGE_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1,%d0
lea %a0@(%d0*4),%a0
dputn %a0
/* Insert the address into the page table
*/
movel ARG1,%d0
andw #-PAGESIZE,%d0
orw #_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
movel %d0,%a0@
dputn %d0
dputc '\n'
func_return mmu_temp_map
func_start mmu_engage,%d0-%d2/%a0-%a3
moveq #ROOT_TABLE_SIZE-1,%d0
/* Temporarily use a different root table. */
lea %pc@(L(kernel_pgdir_ptr)),%a0
movel %a0@,%a2
movel %pc@(L(memory_start)),%a1
movel %a1,%a0@
movel %a2,%a0
1:
movel %a0@+,%a1@+
dbra %d0,1b
lea %pc@(L(temp_mmap_mem)),%a0
movel %a1,%a0@
movew #PAGESIZE-1,%d0
1:
clrl %a1@+
dbra %d0,1b
lea %pc@(1b),%a0
movel #1b,%a1
/* Skip temp mappings if phys == virt */
cmpl %a0,%a1
jeq 1f
mmu_temp_map %a0,%a0
mmu_temp_map %a0,%a1
addw #PAGESIZE,%a0
addw #PAGESIZE,%a1
mmu_temp_map %a0,%a0
mmu_temp_map %a0,%a1
1:
movel %pc@(L(memory_start)),%a3
movel %pc@(L(phys_kernel_start)),%d2
is_not_040_or_060(L(mmu_engage_030))
L(mmu_engage_040):
.chip 68040
nop
cinva %bc
nop
pflusha
nop
movec %a3,%srp
movel #TC_ENABLE+TC_PAGE4K,%d0
movec %d0,%tc /* enable the MMU */
jmp 1f:l
1: nop
movec %a2,%srp
nop
cinva %bc
nop
pflusha
.chip 68k
jra L(mmu_engage_cleanup)
L(mmu_engage_030_temp):
.space 12
L(mmu_engage_030):
.chip 68030
lea %pc@(L(mmu_engage_030_temp)),%a0
movel #0x80000002,%a0@
movel %a3,%a0@(4)
movel #0x0808,%d0
movec %d0,%cacr
pmove %a0@,%srp
pflusha
/*
* enable,super root enable,4096 byte pages,7 bit root index,
* 7 bit pointer index, 6 bit page table index.
*/
movel #0x82c07760,%a0@(8)
pmove %a0@(8),%tc /* enable the MMU */
jmp 1f:l
1: movel %a2,%a0@(4)
movel #0x0808,%d0
movec %d0,%cacr
pmove %a0@,%srp
pflusha
.chip 68k
L(mmu_engage_cleanup):
subl #PAGE_OFFSET,%d2
subl %d2,%a2
movel %a2,L(kernel_pgdir_ptr)
subl %d2,%fp
subl %d2,%sp
subl %d2,ARG0
func_return mmu_engage
func_start mmu_get_root_table_entry,%d0/%a1
#if 0
dputs "mmu_get_root_table_entry:"
dputn ARG1
dputs " ="
#endif
movel %pc@(L(kernel_pgdir_ptr)),%a0
tstl %a0
jne 2f
dputs "\nmmu_init:"
/* Find the start of free memory, get_bi_record does this for us,
* as the bootinfo structure is located directly behind the kernel
* we simply search for the last entry.
*/
get_bi_record BI_LAST
addw #PAGESIZE-1,%a0
movel %a0,%d0
andw #-PAGESIZE,%d0
dputn %d0
lea %pc@(L(memory_start)),%a0
movel %d0,%a0@
lea %pc@(L(kernel_end)),%a0
movel %d0,%a0@
/* we have to return the first page at _stext since the init code
* in mm/init.c simply expects kernel_pg_dir there, the rest of
* page is used for further ptr tables in get_ptr_table.
*/
lea %pc@(_stext),%a0
lea %pc@(L(mmu_cached_pointer_tables)),%a1
movel %a0,%a1@
addl #ROOT_TABLE_SIZE*4,%a1@
lea %pc@(L(mmu_num_pointer_tables)),%a1
addql #1,%a1@
/* clear the page
*/
movel %a0,%a1
movew #PAGESIZE/4-1,%d0
1:
clrl %a1@+
dbra %d0,1b
lea %pc@(L(kernel_pgdir_ptr)),%a1
movel %a0,%a1@
dputn %a0
dputc '\n'
2:
movel ARG1,%d0
lea %a0@(%d0*4),%a0
#if 0
dputn %a0
dputc '\n'
#endif
func_return mmu_get_root_table_entry
func_start mmu_get_ptr_table_entry,%d0/%a1
#if 0
dputs "mmu_get_ptr_table_entry:"
dputn ARG1
dputn ARG2
dputs " ="
#endif
movel ARG1,%a0
movel %a0@,%d0
jne 2f
/* Keep track of the number of pointer tables we use
*/
dputs "\nmmu_get_new_ptr_table:"
lea %pc@(L(mmu_num_pointer_tables)),%a0
movel %a0@,%d0
addql #1,%a0@
/* See if there is a free pointer table in our cache of pointer tables
*/
lea %pc@(L(mmu_cached_pointer_tables)),%a1
andw #7,%d0
jne 1f
/* Get a new pointer table page from above the kernel memory
*/
get_new_page
movel %a0,%a1@
1:
/* There is an unused pointer table in our cache... use it
*/
movel %a1@,%d0
addl #PTR_TABLE_SIZE*4,%a1@
dputn %d0
dputc '\n'
/* Insert the new pointer table into the root table
*/
movel ARG1,%a0
orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
movel %d0,%a0@
2:
/* Extract the pointer table entry
*/
andw #-PTR_TABLE_SIZE,%d0
movel %d0,%a0
movel ARG2,%d0
lea %a0@(%d0*4),%a0
#if 0
dputn %a0
dputc '\n'
#endif
func_return mmu_get_ptr_table_entry
func_start mmu_get_page_table_entry,%d0/%a1
#if 0
dputs "mmu_get_page_table_entry:"
dputn ARG1
dputn ARG2
dputs " ="
#endif
movel ARG1,%a0
movel %a0@,%d0
jne 2f
/* If the page table entry doesn't exist, we allocate a complete new
* page and use it as one continuous big page table which can cover
* 4MB of memory, nearly almost all mappings have that alignment.
*/
get_new_page
addw #_PAGE_TABLE+_PAGE_ACCESSED,%a0
/* align pointer table entry for a page of page tables
*/
movel ARG1,%d0
andw #-(PAGESIZE/PAGE_TABLE_SIZE),%d0
movel %d0,%a1
/* Insert the page tables into the pointer entries
*/
moveq #PAGESIZE/PAGE_TABLE_SIZE/4-1,%d0
1:
movel %a0,%a1@+
lea %a0@(PAGE_TABLE_SIZE*4),%a0
dbra %d0,1b
/* Now we can get the initialized pointer table entry
*/
movel ARG1,%a0
movel %a0@,%d0
2:
/* Extract the page table entry
*/
andw #-PAGE_TABLE_SIZE,%d0
movel %d0,%a0
movel ARG2,%d0
lea %a0@(%d0*4),%a0
#if 0
dputn %a0
dputc '\n'
#endif
func_return mmu_get_page_table_entry
/*
* get_new_page
*
* Return a new page from the memory start and clear it.
*/
func_start get_new_page,%d0/%a1
dputs "\nget_new_page:"
/* allocate the page and adjust memory_start
*/
lea %pc@(L(memory_start)),%a0
movel %a0@,%a1
addl #PAGESIZE,%a0@
/* clear the new page
*/
movel %a1,%a0
movew #PAGESIZE/4-1,%d0
1:
clrl %a1@+
dbra %d0,1b
dputn %a0
dputc '\n'
func_return get_new_page
/*
* Debug output support
* Atarians have a choice between the parallel port, the serial port
* from the MFP or a serial port of the SCC
*/
#ifdef CONFIG_MAC
/* You may define either or both of these. */
#define MAC_USE_SCC_A /* Modem port */
#define MAC_USE_SCC_B /* Printer port */
#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
/* Initialisation table for SCC with 3.6864 MHz PCLK */
L(scc_initable_mac):
.byte 4,0x44 /* x16, 1 stopbit, no parity */
.byte 3,0xc0 /* receiver: 8 bpc */
.byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
.byte 10,0 /* NRZ */
.byte 11,0x50 /* use baud rate generator */
.byte 12,1,13,0 /* 38400 baud */
.byte 14,1 /* Baud rate generator enable */
.byte 3,0xc1 /* enable receiver */
.byte 5,0xea /* enable transmitter */
.byte -1
.even
#endif
#endif /* CONFIG_MAC */
#ifdef CONFIG_ATARI
/* #define USE_PRINTER */
/* #define USE_SCC_B */
/* #define USE_SCC_A */
#define USE_MFP
#if defined(USE_SCC_A) || defined(USE_SCC_B)
/* Initialisation table for SCC with 7.9872 MHz PCLK */
/* PCLK == 8.0539 gives baud == 9680.1 */
L(scc_initable_atari):
.byte 4,0x44 /* x16, 1 stopbit, no parity */
.byte 3,0xc0 /* receiver: 8 bpc */
.byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
.byte 10,0 /* NRZ */
.byte 11,0x50 /* use baud rate generator */
.byte 12,24,13,0 /* 9600 baud */
.byte 14,2,14,3 /* use master clock for BRG, enable */
.byte 3,0xc1 /* enable receiver */
.byte 5,0xea /* enable transmitter */
.byte -1
.even
#endif
#ifdef USE_PRINTER
LPSG_SELECT = 0xff8800
LPSG_READ = 0xff8800
LPSG_WRITE = 0xff8802
LPSG_IO_A = 14
LPSG_IO_B = 15
LPSG_CONTROL = 7
LSTMFP_GPIP = 0xfffa01
LSTMFP_DDR = 0xfffa05
LSTMFP_IERB = 0xfffa09
#elif defined(USE_SCC_B)
LSCC_CTRL = 0xff8c85
LSCC_DATA = 0xff8c87
#elif defined(USE_SCC_A)
LSCC_CTRL = 0xff8c81
LSCC_DATA = 0xff8c83
#elif defined(USE_MFP)
LMFP_UCR = 0xfffa29
LMFP_TDCDR = 0xfffa1d
LMFP_TDDR = 0xfffa25
LMFP_TSR = 0xfffa2d
LMFP_UDR = 0xfffa2f
#endif
#endif /* CONFIG_ATARI */
/*
* Serial port output support.
*/
/*
* Initialize serial port hardware
*/
func_start serial_init,%d0/%d1/%a0/%a1
/*
* Some of the register usage that follows
* CONFIG_AMIGA
* a0 = pointer to boot info record
* d0 = boot info offset
* CONFIG_ATARI
* a0 = address of SCC
* a1 = Liobase address/address of scc_initable_atari
* d0 = init data for serial port
* CONFIG_MAC
* a0 = address of SCC
* a1 = address of scc_initable_mac
* d0 = init data for serial port
*/
#ifdef CONFIG_AMIGA
#define SERIAL_DTR 7
#define SERIAL_CNTRL CIABBASE+C_PRA
is_not_amiga(1f)
lea %pc@(L(custom)),%a0
movel #-ZTWOBASE,%a0@
bclr #SERIAL_DTR,SERIAL_CNTRL-ZTWOBASE
get_bi_record BI_AMIGA_SERPER
movew %a0@,CUSTOMBASE+C_SERPER-ZTWOBASE
| movew #61,CUSTOMBASE+C_SERPER-ZTWOBASE
1:
#endif
#ifdef CONFIG_ATARI
is_not_atari(4f)
movel %pc@(L(iobase)),%a1
#if defined(USE_PRINTER)
bclr #0,%a1@(LSTMFP_IERB)
bclr #0,%a1@(LSTMFP_DDR)
moveb #LPSG_CONTROL,%a1@(LPSG_SELECT)
moveb #0xff,%a1@(LPSG_WRITE)
moveb #LPSG_IO_B,%a1@(LPSG_SELECT)
clrb %a1@(LPSG_WRITE)
moveb #LPSG_IO_A,%a1@(LPSG_SELECT)
moveb %a1@(LPSG_READ),%d0
bset #5,%d0
moveb %d0,%a1@(LPSG_WRITE)
#elif defined(USE_SCC_A) || defined(USE_SCC_B)
lea %a1@(LSCC_CTRL),%a0
/* Reset SCC register pointer */
moveb %a0@,%d0
/* Reset SCC device: write register pointer then register value */
moveb #9,%a0@
moveb #0xc0,%a0@
/* Wait for 5 PCLK cycles, which is about 63 CPU cycles */
/* 5 / 7.9872 MHz = approx. 0.63 us = 63 / 100 MHz */
movel #32,%d0
2:
subq #1,%d0
jne 2b
/* Initialize channel */
lea %pc@(L(scc_initable_atari)),%a1
2: moveb %a1@+,%d0
jmi 3f
moveb %d0,%a0@
moveb %a1@+,%a0@
jra 2b
3: clrb %a0@
#elif defined(USE_MFP)
bclr #1,%a1@(LMFP_TSR)
moveb #0x88,%a1@(LMFP_UCR)
andb #0x70,%a1@(LMFP_TDCDR)
moveb #2,%a1@(LMFP_TDDR)
orb #1,%a1@(LMFP_TDCDR)
bset #1,%a1@(LMFP_TSR)
#endif
jra L(serial_init_done)
4:
#endif
#ifdef CONFIG_MAC
is_not_mac(L(serial_init_not_mac))
#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
#define mac_scc_cha_b_ctrl_offset 0x0
#define mac_scc_cha_a_ctrl_offset 0x2
#define mac_scc_cha_b_data_offset 0x4
#define mac_scc_cha_a_data_offset 0x6
movel %pc@(L(mac_sccbase)),%a0
/* Reset SCC register pointer */
moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0
/* Reset SCC device: write register pointer then register value */
moveb #9,%a0@(mac_scc_cha_a_ctrl_offset)
moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
/* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
/* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
movel #35,%d0
5:
subq #1,%d0
jne 5b
#endif
#ifdef MAC_USE_SCC_A
/* Initialize channel A */
lea %pc@(L(scc_initable_mac)),%a1
5: moveb %a1@+,%d0
jmi 6f
moveb %d0,%a0@(mac_scc_cha_a_ctrl_offset)
moveb %a1@+,%a0@(mac_scc_cha_a_ctrl_offset)
jra 5b
6:
#endif /* MAC_USE_SCC_A */
#ifdef MAC_USE_SCC_B
/* Initialize channel B */
lea %pc@(L(scc_initable_mac)),%a1
7: moveb %a1@+,%d0
jmi 8f
moveb %d0,%a0@(mac_scc_cha_b_ctrl_offset)
moveb %a1@+,%a0@(mac_scc_cha_b_ctrl_offset)
jra 7b
8:
#endif /* MAC_USE_SCC_B */
jra L(serial_init_done)
L(serial_init_not_mac):
#endif /* CONFIG_MAC */
#ifdef CONFIG_Q40
is_not_q40(2f)
/* debug output goes into SRAM, so we don't do it unless requested
- check for '%LX$' signature in SRAM */
lea %pc@(q40_mem_cptr),%a1
move.l #0xff020010,%a1@ /* must be inited - also used by debug=mem */
move.l #0xff020000,%a1
cmp.b #'%',%a1@
bne 2f /*nodbg*/
addq.w #4,%a1
cmp.b #'L',%a1@
bne 2f /*nodbg*/
addq.w #4,%a1
cmp.b #'X',%a1@
bne 2f /*nodbg*/
addq.w #4,%a1
cmp.b #'$',%a1@
bne 2f /*nodbg*/
/* signature OK */
lea %pc@(L(q40_do_debug)),%a1
tas %a1@
/*nodbg: q40_do_debug is 0 by default*/
2:
#endif
#ifdef CONFIG_MVME16x
is_not_mvme16x(L(serial_init_not_mvme16x))
moveb #0x10,M167_PCSCCMICR
moveb #0x10,M167_PCSCCTICR
moveb #0x10,M167_PCSCCRICR
jra L(serial_init_done)
L(serial_init_not_mvme16x):
#endif
#ifdef CONFIG_APOLLO
/* We count on the PROM initializing SIO1 */
#endif
#ifdef CONFIG_HP300
/* We count on the boot loader initialising the UART */
#endif
L(serial_init_done):
func_return serial_init
/*
* Output character on serial port.
*/
func_start serial_putc,%d0/%d1/%a0/%a1
movel ARG1,%d0
cmpib #'\n',%d0
jbne 1f
/* A little safe recursion is good for the soul */
serial_putc #'\r'
1:
#ifdef CONFIG_AMIGA
is_not_amiga(2f)
andw #0x00ff,%d0
oriw #0x0100,%d0
movel %pc@(L(custom)),%a0
movew %d0,%a0@(CUSTOMBASE+C_SERDAT)
1: movew %a0@(CUSTOMBASE+C_SERDATR),%d0
andw #0x2000,%d0
jeq 1b
jra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_MAC
is_not_mac(5f)
#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
movel %pc@(L(mac_sccbase)),%a1
#endif
#ifdef MAC_USE_SCC_A
3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset)
jeq 3b
moveb %d0,%a1@(mac_scc_cha_a_data_offset)
#endif /* MAC_USE_SCC_A */
#ifdef MAC_USE_SCC_B
4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset)
jeq 4b
moveb %d0,%a1@(mac_scc_cha_b_data_offset)
#endif /* MAC_USE_SCC_B */
jra L(serial_putc_done)
5:
#endif /* CONFIG_MAC */
#ifdef CONFIG_ATARI
is_not_atari(4f)
movel %pc@(L(iobase)),%a1
#if defined(USE_PRINTER)
3: btst #0,%a1@(LSTMFP_GPIP)
jne 3b
moveb #LPSG_IO_B,%a1@(LPSG_SELECT)
moveb %d0,%a1@(LPSG_WRITE)
moveb #LPSG_IO_A,%a1@(LPSG_SELECT)
moveb %a1@(LPSG_READ),%d0
bclr #5,%d0
moveb %d0,%a1@(LPSG_WRITE)
nop
nop
bset #5,%d0
moveb %d0,%a1@(LPSG_WRITE)
#elif defined(USE_SCC_A) || defined(USE_SCC_B)
3: btst #2,%a1@(LSCC_CTRL)
jeq 3b
moveb %d0,%a1@(LSCC_DATA)
#elif defined(USE_MFP)
3: btst #7,%a1@(LMFP_TSR)
jeq 3b
moveb %d0,%a1@(LMFP_UDR)
#endif
jra L(serial_putc_done)
4:
#endif /* CONFIG_ATARI */
#ifdef CONFIG_MVME147
is_not_mvme147(2f)
1: btst #2,M147_SCC_CTRL_A
jeq 1b
moveb %d0,M147_SCC_DATA_A
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_MVME16x
is_not_mvme16x(2f)
/*
* If the loader gave us a board type then we can use that to
* select an appropriate output routine; otherwise we just use
* the Bug code. If we have to use the Bug that means the Bug
* workspace has to be valid, which means the Bug has to use
* the SRAM, which is non-standard.
*/
moveml %d0-%d7/%a2-%a6,%sp@-
movel vme_brdtype,%d1
jeq 1f | No tag - use the Bug
cmpi #VME_TYPE_MVME162,%d1
jeq 6f
cmpi #VME_TYPE_MVME172,%d1
jne 5f
/* 162/172; it's an SCC */
6: btst #2,M162_SCC_CTRL_A
nop
nop
nop
jeq 6b
moveb #8,M162_SCC_CTRL_A
nop
nop
nop
moveb %d0,M162_SCC_CTRL_A
jra 3f
5:
/* 166/167/177; it's a CD2401 */
moveb #0,M167_CYCAR
moveb M167_CYIER,%d2
moveb #0x02,M167_CYIER
7:
btst #5,M167_PCSCCTICR
jeq 7b
moveb M167_PCTPIACKR,%d1
moveb M167_CYLICR,%d1
jeq 8f
moveb #0x08,M167_CYTEOIR
jra 7b
8:
moveb %d0,M167_CYTDR
moveb #0,M167_CYTEOIR
moveb %d2,M167_CYIER
jra 3f
1:
moveb %d0,%sp@-
trap #15
.word 0x0020 /* TRAP 0x020 */
3:
moveml %sp@+,%d0-%d7/%a2-%a6
jbra L(serial_putc_done)
2:
#endif /* CONFIG_MVME16x */
#ifdef CONFIG_BVME6000
is_not_bvme6000(2f)
/*
* The BVME6000 machine has a serial port ...
*/
1: btst #2,BVME_SCC_CTRL_A
jeq 1b
moveb %d0,BVME_SCC_DATA_A
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_SUN3X
is_not_sun3x(2f)
movel %d0,-(%sp)
movel 0xFEFE0018,%a1
jbsr (%a1)
addq #4,%sp
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_Q40
is_not_q40(2f)
tst.l %pc@(L(q40_do_debug)) /* only debug if requested */
beq 2f
lea %pc@(q40_mem_cptr),%a1
move.l %a1@,%a0
move.b %d0,%a0@
addq.l #4,%a0
move.l %a0,%a1@
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_APOLLO
is_not_apollo(2f)
movl %pc@(L(iobase)),%a1
moveb %d0,%a1@(LTHRB0)
1: moveb %a1@(LSRB0),%d0
andb #0x4,%d0
beq 1b
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_HP300
is_not_hp300(3f)
movl %pc@(L(iobase)),%a1
addl %pc@(L(uartbase)),%a1
movel %pc@(L(uart_scode)),%d1 /* Check the scode */
jmi 3f /* Unset? Exit */
cmpi #256,%d1 /* APCI scode? */
jeq 2f
1: moveb %a1@(DCALSR),%d1 /* Output to DCA */
andb #0x20,%d1
beq 1b
moveb %d0,%a1@(DCADATA)
jbra L(serial_putc_done)
2: moveb %a1@(APCILSR),%d1 /* Output to APCI */
andb #0x20,%d1
beq 2b
moveb %d0,%a1@(APCIDATA)
jbra L(serial_putc_done)
3:
#endif
#ifdef CONFIG_VIRT
is_not_virt(1f)
movel L(virt_gf_tty_base),%a1
movel %d0,%a1@(GF_PUT_CHAR)
1:
#endif
L(serial_putc_done):
func_return serial_putc
/*
* Output a string.
*/
func_start puts,%d0/%a0
movel ARG1,%a0
jra 2f
1:
#ifdef CONSOLE_DEBUG
console_putc %d0
#endif
#ifdef SERIAL_DEBUG
serial_putc %d0
#endif
2: moveb %a0@+,%d0
jne 1b
func_return puts
/*
* Output number in hex notation.
*/
func_start putn,%d0-%d2
putc ' '
movel ARG1,%d0
moveq #7,%d1
1: roll #4,%d0
move %d0,%d2
andb #0x0f,%d2
addb #'0',%d2
cmpb #'9',%d2
jls 2f
addb #'A'-('9'+1),%d2
2:
#ifdef CONSOLE_DEBUG
console_putc %d2
#endif
#ifdef SERIAL_DEBUG
serial_putc %d2
#endif
dbra %d1,1b
func_return putn
#ifdef CONFIG_EARLY_PRINTK
/*
* This routine takes its parameters on the stack. It then
* turns around and calls the internal routines. This routine
* is used by the boot console.
*
* The calling parameters are:
* void debug_cons_nputs(const char *str, unsigned length)
*
* This routine does NOT understand variable arguments only
* simple strings!
*/
ENTRY(debug_cons_nputs)
moveml %d0/%d1/%a0,%sp@-
movew %sr,%sp@-
ori #0x0700,%sr
movel %sp@(18),%a0 /* fetch parameter */
movel %sp@(22),%d1 /* fetch parameter */
jra 2f
1:
#ifdef CONSOLE_DEBUG
console_putc %d0
#endif
#ifdef SERIAL_DEBUG
serial_putc %d0
#endif
subq #1,%d1
2: jeq 3f
moveb %a0@+,%d0
jne 1b
3:
movew %sp@+,%sr
moveml %sp@+,%d0/%d1/%a0
rts
#endif /* CONFIG_EARLY_PRINTK */
#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
func_start set_leds,%d0/%a0
movel ARG1,%d0
#ifdef CONFIG_HP300
is_not_hp300(1f)
movel %pc@(L(iobase)),%a0
moveb %d0,%a0@(0x1ffff)
jra 2f
#endif
1:
#ifdef CONFIG_APOLLO
movel %pc@(L(iobase)),%a0
lsll #8,%d0
eorw #0xff00,%d0
moveb %d0,%a0@(LCPUCTRL)
#endif
2:
func_return set_leds
#endif
#ifdef CONSOLE_DEBUG
/*
* For continuity, see the data alignment
* to which this structure is tied.
*/
#define Lconsole_struct_cur_column 0
#define Lconsole_struct_cur_row 4
#define Lconsole_struct_num_columns 8
#define Lconsole_struct_num_rows 12
#define Lconsole_struct_left_edge 16
func_start console_init,%a0-%a4/%d0-%d7
/*
* Some of the register usage that follows
* a0 = pointer to boot_info
* a1 = pointer to screen
* a2 = pointer to console_globals
* d3 = pixel width of screen
* d4 = pixel height of screen
* (d3,d4) ~= (x,y) of a point just below
* and to the right of the screen
* NOT on the screen!
* d5 = number of bytes per scan line
* d6 = number of bytes on the entire screen
*/
lea %pc@(L(console_globals)),%a2
movel %pc@(L(mac_videobase)),%a1
movel %pc@(L(mac_rowbytes)),%d5
movel %pc@(L(mac_dimensions)),%d3 /* -> low byte */
movel %d3,%d4
swap %d4 /* -> high byte */
andl #0xffff,%d3 /* d3 = screen width in pixels */
andl #0xffff,%d4 /* d4 = screen height in pixels */
movel %d5,%d6
| subl #20,%d6
mulul %d4,%d6 /* scan line bytes x num scan lines */
divul #8,%d6 /* we'll clear 8 bytes at a time */
moveq #-1,%d0 /* Mac_black */
subq #1,%d6
L(console_clear_loop):
movel %d0,%a1@+
movel %d0,%a1@+
dbra %d6,L(console_clear_loop)
/* Calculate font size */
#if defined(FONT_8x8) && defined(CONFIG_FONT_8x8)
lea %pc@(font_vga_8x8),%a0
#elif defined(FONT_8x16) && defined(CONFIG_FONT_8x16)
lea %pc@(font_vga_8x16),%a0
#elif defined(FONT_6x11) && defined(CONFIG_FONT_6x11)
lea %pc@(font_vga_6x11),%a0
#elif defined(CONFIG_FONT_8x8) /* default */
lea %pc@(font_vga_8x8),%a0
#else /* no compiled-in font */
lea 0,%a0
#endif
/*
* At this point we make a shift in register usage
* a1 = address of console_font pointer
*/
lea %pc@(L(console_font)),%a1
movel %a0,%a1@ /* store pointer to struct fbcon_font_desc in console_font */
tstl %a0
jeq 1f
lea %pc@(L(console_font_data)),%a4
movel %a0@(FONT_DESC_DATA),%d0
subl #L(console_font),%a1
addl %a1,%d0
movel %d0,%a4@
/*
* Calculate global maxs
* Note - we can use either an
* 8 x 16 or 8 x 8 character font
* 6 x 11 also supported
*/
/* ASSERT: a0 = contents of Lconsole_font */
movel %d3,%d0 /* screen width in pixels */
divul %a0@(FONT_DESC_WIDTH),%d0 /* d0 = max num chars per row */
movel %d4,%d1 /* screen height in pixels */
divul %a0@(FONT_DESC_HEIGHT),%d1 /* d1 = max num rows */
movel %d0,%a2@(Lconsole_struct_num_columns)
movel %d1,%a2@(Lconsole_struct_num_rows)
/*
* Clear the current row and column
*/
clrl %a2@(Lconsole_struct_cur_column)
clrl %a2@(Lconsole_struct_cur_row)
clrl %a2@(Lconsole_struct_left_edge)
/*
* Initialization is complete
*/
1:
func_return console_init
#ifdef CONFIG_LOGO
func_start console_put_penguin,%a0-%a1/%d0-%d7
/*
* Get 'that_penguin' onto the screen in the upper right corner
* penguin is 64 x 74 pixels, align against right edge of screen
*/
lea %pc@(L(mac_dimensions)),%a0
movel %a0@,%d0
andil #0xffff,%d0
subil #64,%d0 /* snug up against the right edge */
clrl %d1 /* start at the top */
movel #73,%d7
lea %pc@(L(that_penguin)),%a1
L(console_penguin_row):
movel #31,%d6
L(console_penguin_pixel_pair):
moveb %a1@,%d2
lsrb #4,%d2
console_plot_pixel %d0,%d1,%d2
addq #1,%d0
moveb %a1@+,%d2
console_plot_pixel %d0,%d1,%d2
addq #1,%d0
dbra %d6,L(console_penguin_pixel_pair)
subil #64,%d0
addq #1,%d1
dbra %d7,L(console_penguin_row)
func_return console_put_penguin
/* include penguin bitmap */
L(that_penguin):
#include "../mac/mac_penguin.S"
#endif
/*
* Calculate source and destination addresses
* output a1 = dest
* a2 = source
*/
func_start console_scroll,%a0-%a4/%d0-%d7
lea %pc@(L(mac_videobase)),%a0
movel %a0@,%a1
movel %a1,%a2
lea %pc@(L(mac_rowbytes)),%a0
movel %a0@,%d5
movel %pc@(L(console_font)),%a0
tstl %a0
jeq 1f
mulul %a0@(FONT_DESC_HEIGHT),%d5 /* account for # scan lines per character */
addal %d5,%a2
/*
* Get dimensions
*/
lea %pc@(L(mac_dimensions)),%a0
movel %a0@,%d3
movel %d3,%d4
swap %d4
andl #0xffff,%d3 /* d3 = screen width in pixels */
andl #0xffff,%d4 /* d4 = screen height in pixels */
/*
* Calculate number of bytes to move
*/
lea %pc@(L(mac_rowbytes)),%a0
movel %a0@,%d6
movel %pc@(L(console_font)),%a0
subl %a0@(FONT_DESC_HEIGHT),%d4 /* we're not scrolling the top row! */
mulul %d4,%d6 /* scan line bytes x num scan lines */
divul #32,%d6 /* we'll move 8 longs at a time */
subq #1,%d6
L(console_scroll_loop):
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
dbra %d6,L(console_scroll_loop)
lea %pc@(L(mac_rowbytes)),%a0
movel %a0@,%d6
movel %pc@(L(console_font)),%a0
mulul %a0@(FONT_DESC_HEIGHT),%d6 /* scan line bytes x font height */
divul #32,%d6 /* we'll move 8 words at a time */
subq #1,%d6
moveq #-1,%d0
L(console_scroll_clear_loop):
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
dbra %d6,L(console_scroll_clear_loop)
1:
func_return console_scroll
func_start console_putc,%a0/%a1/%d0-%d7
is_not_mac(L(console_exit))
tstl %pc@(L(console_font))
jeq L(console_exit)
/* Output character in d7 on console.
*/
movel ARG1,%d7
cmpib #'\n',%d7
jbne 1f
/* A little safe recursion is good for the soul */
console_putc #'\r'
1:
lea %pc@(L(console_globals)),%a0
cmpib #10,%d7
jne L(console_not_lf)
movel %a0@(Lconsole_struct_cur_row),%d0
addil #1,%d0
movel %d0,%a0@(Lconsole_struct_cur_row)
movel %a0@(Lconsole_struct_num_rows),%d1
cmpl %d1,%d0
jcs 1f
subil #1,%d0
movel %d0,%a0@(Lconsole_struct_cur_row)
console_scroll
1:
jra L(console_exit)
L(console_not_lf):
cmpib #13,%d7
jne L(console_not_cr)
clrl %a0@(Lconsole_struct_cur_column)
jra L(console_exit)
L(console_not_cr):
cmpib #1,%d7
jne L(console_not_home)
clrl %a0@(Lconsole_struct_cur_row)
clrl %a0@(Lconsole_struct_cur_column)
jra L(console_exit)
/*
* At this point we know that the %d7 character is going to be
* rendered on the screen. Register usage is -
* a0 = pointer to console globals
* a1 = font data
* d0 = cursor column
* d1 = cursor row to draw the character
* d7 = character number
*/
L(console_not_home):
movel %a0@(Lconsole_struct_cur_column),%d0
addql #1,%a0@(Lconsole_struct_cur_column)
movel %a0@(Lconsole_struct_num_columns),%d1
cmpl %d1,%d0
jcs 1f
console_putc #'\n' /* recursion is OK! */
1:
movel %a0@(Lconsole_struct_cur_row),%d1
/*
* At this point we make a shift in register usage
* a0 = address of pointer to font data (fbcon_font_desc)
*/
movel %pc@(L(console_font)),%a0
movel %pc@(L(console_font_data)),%a1 /* Load fbcon_font_desc.data into a1 */
andl #0x000000ff,%d7
/* ASSERT: a0 = contents of Lconsole_font */
mulul %a0@(FONT_DESC_HEIGHT),%d7 /* d7 = index into font data */
addl %d7,%a1 /* a1 = points to char image */
/*
* At this point we make a shift in register usage
* d0 = pixel coordinate, x
* d1 = pixel coordinate, y
* d2 = (bit 0) 1/0 for white/black (!) pixel on screen
* d3 = font scan line data (8 pixels)
* d6 = count down for the font's pixel width (8)
* d7 = count down for the font's pixel count in height
*/
/* ASSERT: a0 = contents of Lconsole_font */
mulul %a0@(FONT_DESC_WIDTH),%d0
mulul %a0@(FONT_DESC_HEIGHT),%d1
movel %a0@(FONT_DESC_HEIGHT),%d7 /* Load fbcon_font_desc.height into d7 */
subq #1,%d7
L(console_read_char_scanline):
moveb %a1@+,%d3
/* ASSERT: a0 = contents of Lconsole_font */
movel %a0@(FONT_DESC_WIDTH),%d6 /* Load fbcon_font_desc.width into d6 */
subql #1,%d6
L(console_do_font_scanline):
lslb #1,%d3
scsb %d2 /* convert 1 bit into a byte */
console_plot_pixel %d0,%d1,%d2
addq #1,%d0
dbra %d6,L(console_do_font_scanline)
/* ASSERT: a0 = contents of Lconsole_font */
subl %a0@(FONT_DESC_WIDTH),%d0
addq #1,%d1
dbra %d7,L(console_read_char_scanline)
L(console_exit):
func_return console_putc
/*
* Input:
* d0 = x coordinate
* d1 = y coordinate
* d2 = (bit 0) 1/0 for white/black (!)
* All registers are preserved
*/
func_start console_plot_pixel,%a0-%a1/%d0-%d4
movel %pc@(L(mac_videobase)),%a1
movel %pc@(L(mac_videodepth)),%d3
movel ARG1,%d0
movel ARG2,%d1
mulul %pc@(L(mac_rowbytes)),%d1
movel ARG3,%d2
/*
* Register usage:
* d0 = x coord becomes byte offset into frame buffer
* d1 = y coord
* d2 = black or white (0/1)
* d3 = video depth
* d4 = temp of x (d0) for many bit depths
*/
L(test_1bit):
cmpb #1,%d3
jbne L(test_2bit)
movel %d0,%d4 /* we need the low order 3 bits! */
divul #8,%d0
addal %d0,%a1
addal %d1,%a1
andb #7,%d4
eorb #7,%d4 /* reverse the x-coordinate w/ screen-bit # */
andb #1,%d2
jbne L(white_1)
bsetb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(white_1):
bclrb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(test_2bit):
cmpb #2,%d3
jbne L(test_4bit)
movel %d0,%d4 /* we need the low order 2 bits! */
divul #4,%d0
addal %d0,%a1
addal %d1,%a1
andb #3,%d4
eorb #3,%d4 /* reverse the x-coordinate w/ screen-bit # */
lsll #1,%d4 /* ! */
andb #1,%d2
jbne L(white_2)
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(white_2):
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(test_4bit):
cmpb #4,%d3
jbne L(test_8bit)
movel %d0,%d4 /* we need the low order bit! */
divul #2,%d0
addal %d0,%a1
addal %d1,%a1
andb #1,%d4
eorb #1,%d4
lsll #2,%d4 /* ! */
andb #1,%d2
jbne L(white_4)
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(white_4):
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(test_8bit):
cmpb #8,%d3
jbne L(test_16bit)
addal %d0,%a1
addal %d1,%a1
andb #1,%d2
jbne L(white_8)
moveb #0xff,%a1@
jbra L(console_plot_pixel_exit)
L(white_8):
clrb %a1@
jbra L(console_plot_pixel_exit)
L(test_16bit):
cmpb #16,%d3
jbne L(console_plot_pixel_exit)
addal %d0,%a1
addal %d0,%a1
addal %d1,%a1
andb #1,%d2
jbne L(white_16)
clrw %a1@
jbra L(console_plot_pixel_exit)
L(white_16):
movew #0x0fff,%a1@
jbra L(console_plot_pixel_exit)
L(console_plot_pixel_exit):
func_return console_plot_pixel
#endif /* CONSOLE_DEBUG */
__INITDATA
.align 4
m68k_init_mapped_size:
.long 0
#if defined(CONFIG_ATARI) || defined(CONFIG_AMIGA) || \
defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
L(custom):
L(iobase):
.long 0
#endif
#ifdef CONSOLE_DEBUG
L(console_globals):
.long 0 /* cursor column */
.long 0 /* cursor row */
.long 0 /* max num columns */
.long 0 /* max num rows */
.long 0 /* left edge */
L(console_font):
.long 0 /* pointer to console font (struct font_desc) */
L(console_font_data):
.long 0 /* pointer to console font data */
#endif /* CONSOLE_DEBUG */
#if defined(MMU_PRINT)
L(mmu_print_data):
.long 0 /* valid flag */
.long 0 /* start logical */
.long 0 /* next logical */
.long 0 /* start physical */
.long 0 /* next physical */
#endif /* MMU_PRINT */
L(cputype):
.long 0
L(mmu_cached_pointer_tables):
.long 0
L(mmu_num_pointer_tables):
.long 0
L(phys_kernel_start):
.long 0
L(kernel_end):
.long 0
L(memory_start):
.long 0
L(kernel_pgdir_ptr):
.long 0
L(temp_mmap_mem):
.long 0
#if defined (CONFIG_MVME147)
M147_SCC_CTRL_A = 0xfffe3002
M147_SCC_DATA_A = 0xfffe3003
#endif
#if defined (CONFIG_MVME16x)
M162_SCC_CTRL_A = 0xfff45005
M167_CYCAR = 0xfff450ee
M167_CYIER = 0xfff45011
M167_CYLICR = 0xfff45026
M167_CYTEOIR = 0xfff45085
M167_CYTDR = 0xfff450f8
M167_PCSCCMICR = 0xfff4201d
M167_PCSCCTICR = 0xfff4201e
M167_PCSCCRICR = 0xfff4201f
M167_PCTPIACKR = 0xfff42025
#endif
#if defined (CONFIG_BVME6000)
BVME_SCC_CTRL_A = 0xffb0000b
BVME_SCC_DATA_A = 0xffb0000f
#endif
#if defined(CONFIG_MAC)
L(mac_videobase):
.long 0
L(mac_videodepth):
.long 0
L(mac_dimensions):
.long 0
L(mac_rowbytes):
.long 0
L(mac_sccbase):
.long 0
#endif /* CONFIG_MAC */
#if defined (CONFIG_APOLLO)
LSRB0 = 0x10412
LTHRB0 = 0x10416
LCPUCTRL = 0x10100
#endif
#if defined(CONFIG_HP300)
DCADATA = 0x11
DCALSR = 0x1b
APCIDATA = 0x00
APCILSR = 0x14
L(uartbase):
.long 0
L(uart_scode):
.long -1
#endif
__FINIT
.data
.align 4
availmem:
.long 0
m68k_pgtable_cachemode:
.long 0
m68k_supervisor_cachemode:
.long 0
#if defined(CONFIG_MVME16x)
mvme_bdid:
.long 0,0,0,0,0,0,0,0
#endif
#if defined(CONFIG_Q40)
q40_mem_cptr:
.long 0
L(q40_do_debug):
.long 0
#endif
#if defined(CONFIG_VIRT)
GF_PUT_CHAR = 0x00
L(virt_gf_tty_base):
.long 0
#endif /* CONFIG_VIRT */
|
aixcc-public/challenge-001-exemplar-source
| 1,988
|
arch/m68k/kernel/sun3-head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/entry.h>
#include <asm/page.h>
#include <asm/contregs.h>
#include <asm/sun3-head.h>
PSL_HIGHIPL = 0x2700
NBSG = 0x20000
ICACHE_ONLY = 0x00000009
CACHES_OFF = 0x00000008 | actually a clear and disable --m
#define MAS_STACK INT_STACK
ROOT_TABLE_SIZE = 128
PAGESIZE = 8192
SUN3_INVALID_PMEG = 255
.globl bootup_user_stack
.globl bootup_kernel_stack
.globl pg0
.globl swapper_pg_dir
.globl kernel_pmd_table
.globl availmem
.global m68k_pgtable_cachemode
.global kpt
| todo: all these should be in bss!
swapper_pg_dir: .skip 0x2000
pg0: .skip 0x2000
kernel_pmd_table: .skip 0x2000
.globl kernel_pg_dir
.equ kernel_pg_dir,kernel_pmd_table
__HEAD
ENTRY(_stext)
ENTRY(_start)
/* Firstly, disable interrupts and set up function codes. */
movew #PSL_HIGHIPL, %sr
moveq #FC_CONTROL, %d0
movec %d0, %sfc
movec %d0, %dfc
/* Make sure we're in context zero. */
moveq #0, %d0
movsb %d0, AC_CONTEXT
/* map everything the bootloader left us into high memory, clean up the
excess later */
lea (AC_SEGMAP+0),%a0
lea (AC_SEGMAP+KERNBASE),%a1
1:
movsb %a0@, %d1
movsb %d1, %a1@
cmpib #SUN3_INVALID_PMEG, %d1
beq 2f
addl #NBSG,%a0
addl #NBSG,%a1
jmp 1b
2:
/* Disable caches and jump to high code. */
moveq #ICACHE_ONLY,%d0 | Cache disabled until we're ready to enable it
movc %d0, %cacr | is this the right value? (yes --m)
jmp 1f:l
/* Following code executes at high addresses (0xE000xxx). */
1: lea init_task,%curptr | get initial thread...
lea init_thread_union+THREAD_SIZE,%sp | ...and its stack.
/* Point MSP at an invalid page to trap if it's used. --m */
movl #(PAGESIZE),%d0
movc %d0,%msp
moveq #-1,%d0
movsb %d0,(AC_SEGMAP+0x0)
jbsr sun3_init
jbsr base_trap_init
jbsr start_kernel
trap #15
.data
.even
kpt:
.long 0
availmem:
.long 0
|
aixcc-public/challenge-001-exemplar-source
| 9,946
|
arch/m68k/kernel/entry.S
|
/* -*- mode: asm -*-
*
* linux/arch/m68k/kernel/entry.S
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*
*/
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
*/
/*
* 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
* all pointers that used to be 'current' are now entry
* number 0 in the 'current_set' list.
*
* 6/05/00 RZ: addedd writeback completion after return from sighandler
* for 68040
*/
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
.globl system_call, buserr, trap, resume
.globl sys_call_table
.globl __sys_fork, __sys_clone, __sys_vfork
.globl bad_interrupt
.globl auto_irqhandler_fixup
.globl user_irqvec_fixup
.text
ENTRY(__sys_fork)
SAVE_SWITCH_STACK
jbsr sys_fork
lea %sp@(24),%sp
rts
ENTRY(__sys_clone)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone
lea %sp@(28),%sp
rts
ENTRY(__sys_vfork)
SAVE_SWITCH_STACK
jbsr sys_vfork
lea %sp@(24),%sp
rts
ENTRY(__sys_clone3)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone3
lea %sp@(28),%sp
rts
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
movel %sp,%a1 | switch_stack pointer
lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
jbsr do_sigreturn
jra 1f | shared with rt_sigreturn()
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
movel %sp,%a1 | switch_stack pointer
lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
| stack contents:
| [original pt_regs address] [original switch_stack address]
| [gap] [switch_stack] [pt_regs] [exception frame]
jbsr do_rt_sigreturn
1:
| stack contents now:
| [original pt_regs address] [original switch_stack address]
| [unused part of the gap] [moved switch_stack] [moved pt_regs]
| [replacement exception frame]
| return value of do_{rt_,}sigreturn() points to moved switch_stack.
movel %d0,%sp | discard the leftover junk
RESTORE_SWITCH_STACK
| stack contents now is just [syscall return address] [pt_regs] [frame]
| return pt_regs.d0
movel %sp@(PT_OFF_D0+4),%d0
rts
ENTRY(buserr)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
jbsr buserr_c
addql #4,%sp
jra ret_from_exception
ENTRY(trap)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
jbsr trap_c
addql #4,%sp
jra ret_from_exception
| After a fork we jump here directly from resume,
| so that %d1 contains the previous task
| schedule_tail now used regardless of CONFIG_SMP
ENTRY(ret_from_fork)
movel %d1,%sp@-
jsr schedule_tail
addql #4,%sp
jra ret_from_exception
ENTRY(ret_from_kernel_thread)
| a3 contains the kernel thread payload, d7 - its argument
movel %d1,%sp@-
jsr schedule_tail
movel %d7,(%sp)
jsr %a3@
addql #4,%sp
jra ret_from_exception
#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
#ifdef TRAP_DBG_INTERRUPT
.globl dbginterrupt
ENTRY(dbginterrupt)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- /* stack frame pointer argument */
jsr dbginterrupt_c
addql #4,%sp
jra ret_from_exception
#endif
ENTRY(reschedule)
/* save top of frame */
pea %sp@
jbsr set_esp0
addql #4,%sp
pea ret_from_exception
jmp schedule
ENTRY(ret_from_user_signal)
moveq #__NR_sigreturn,%d0
trap #0
ENTRY(ret_from_user_rt_signal)
movel #__NR_rt_sigreturn,%d0
trap #0
#else
do_trace_entry:
movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
addql #1,%d0 | optimization for cmpil #-1,%d0
jeq ret_from_syscall
movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0
jcs syscall
jra ret_from_syscall
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall
do_trace_exit:
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
RESTORE_SWITCH_STACK
addql #4,%sp
jra .Lret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
GET_CURRENT(%d1)
movel %d1,%a1
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
| syscall trace?
tstb %a1@(TINFO_FLAGS+2)
jmi do_trace_entry
cmpl #NR_syscalls,%d0
jcc badsys
syscall:
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall:
|oriw #0x0700,%sr
movel %curptr@(TASK_STACK),%a1
movew %a1@(TINFO_FLAGS+2),%d0
jne syscall_exit_work
1: RESTORE_ALL
syscall_exit_work:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
lslw #1,%d0
jcs do_trace_exit
jmi do_delayed_trace
lslw #8,%d0
jne do_signal_return
pea resume_userspace
jra schedule
ENTRY(ret_from_exception)
.Lret_from_exception:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
| heavy interrupt load
andw #ALLOWINT,%sr
resume_userspace:
movel %curptr@(TASK_STACK),%a1
moveb %a1@(TINFO_FLAGS+3),%d0
jne exit_work
1: RESTORE_ALL
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
lslb #1,%d0
jne do_signal_return
pea resume_userspace
jra schedule
do_signal_return:
|andw #ALLOWINT,%sr
subql #4,%sp | dummy return address
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
bsrl do_notify_resume
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jbra resume_userspace
do_delayed_trace:
bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
pea 1 | send SIGTRAP
movel %curptr,%sp@-
pea LSIGTRAP
jbsr send_sig
addql #8,%sp
addql #4,%sp
jbra resume_userspace
/* This is the main interrupt handler for autovector interrupts */
ENTRY(auto_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
auto_irqhandler_fixup = . + 2
jsr do_IRQ | process the IRQ
addql #8,%sp | pop parameters off stack
jra ret_from_exception
/* Handler for user defined interrupt vectors */
ENTRY(user_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2
subw #VEC_USER,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
jsr do_IRQ | process the IRQ
addql #8,%sp | pop parameters off stack
jra ret_from_exception
/* Handler for uninitialized and spurious interrupts */
ENTRY(bad_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@-
jsr handle_badint
addql #4,%sp
jra ret_from_exception
resume:
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* registers until their contents are no longer needed.
*/
/* save sr */
movew %sr,%a0@(TASK_THREAD+THREAD_SR)
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0
movew %d0,%a0@(TASK_THREAD+THREAD_FC)
/* save usp */
/* it is better to use a movel here instead of a movew 8*) */
movec %usp,%d0
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
/* save non-scratch registers on stack */
SAVE_SWITCH_STACK
/* save current kernel stack pointer */
movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
/* save floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 3f
#endif
fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* Return previous task in %d1 */
movel %curptr,%d1
/* switch to new task (a1 contains new task) */
movel %a1,%curptr
/* restore floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 4f
#endif
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* restore the kernel stack pointer */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp
/* restore non-scratch registers */
RESTORE_SWITCH_STACK
/* restore user stack pointer */
movel %a1@(TASK_THREAD+THREAD_USP),%a0
movel %a0,%usp
/* restore fs (sfc,%dfc) */
movew %a1@(TASK_THREAD+THREAD_FC),%a0
movec %a0,%sfc
movec %a0,%dfc
/* restore status register */
movew %a1@(TASK_THREAD+THREAD_SR),%sr
rts
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.