repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
aixcc-public/challenge-001-exemplar-source
1,206
arch/arm/include/debug/imx.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/mach-imx/include/mach/debug-macro.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks */ #include <asm/assembler.h> #include "imx-uart.h" /* * FIXME: This is a copy of IMX_IO_P2V in hardware.h, and needs to * stay sync with that. It's hard to maintain, and should be fixed * globally for multi-platform build to use a fixed virtual address * for low-level debug uart port across platforms. */ #define IMX_IO_P2V(x) ( \ (((x) & 0x80000000) >> 7) | \ (0xf4000000 + \ (((x) & 0x50000000) >> 6) + \ (((x) & 0x0b000000) >> 4) + \ (((x) & 0x000fffff)))) #define UART_VADDR IMX_IO_P2V(UART_PADDR) .macro addruart, rp, rv, tmp ldr \rp, =UART_PADDR @ physical ldr \rv, =UART_VADDR @ virtual .endm .macro senduart,rd,rx ARM_BE8(rev \rd, \rd) str \rd, [\rx, #0x40] @ TXDATA .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy,rd,rx .endm .macro busyuart,rd,rx 1002: ldr \rd, [\rx, #0x98] @ SR2 ARM_BE8(rev \rd, \rd) tst \rd, #1 << 3 @ TXDC beq 1002b @ wait until transmit done .endm
aixcc-public/challenge-001-exemplar-source
2,092
arch/arm/include/debug/omap2plus.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks */ #include <linux/serial_reg.h> /* External port on Zoom2/3 */ #define ZOOM_UART_BASE 0x10000000 #define ZOOM_UART_VIRT 0xfa400000 #define OMAP_PORT_SHIFT 2 #define ZOOM_PORT_SHIFT 1 #define UART_OFFSET(addr) ((addr) & 0x00ffffff) .pushsection .data .align 2 omap_uart_phys: .word 0 omap_uart_virt: .word 0 omap_uart_lsr: .word 0 .popsection .macro addruart, rp, rv, tmp /* Use omap_uart_phys/virt if already configured */ 10: adr \rp, 99f @ get effective addr of 99f ldr \rv, [\rp] @ get absolute addr of 99f sub \rv, \rv, \rp @ offset between the two ldr \rp, [\rp, #4] @ abs addr of omap_uart_phys sub \tmp, \rp, \rv @ make it effective ldr \rp, [\tmp, #0] @ omap_uart_phys ldr \rv, [\tmp, #4] @ omap_uart_virt cmp \rp, #0 @ is port configured? cmpne \rv, #0 bne 100f @ already configured /* Configure the UART offset from the phys/virt base */ #ifdef CONFIG_DEBUG_ZOOM_UART ldr \rp, =ZOOM_UART_BASE str \rp, [\tmp, #0] @ omap_uart_phys ldr \rp, =ZOOM_UART_VIRT str \rp, [\tmp, #4] @ omap_uart_virt mov \rp, #(UART_LSR << ZOOM_PORT_SHIFT) str \rp, [\tmp, #8] @ omap_uart_lsr #endif b 10b .align 99: .word . .word omap_uart_phys .ltorg 100: /* Pass the UART_LSR reg address */ ldr \tmp, [\tmp, #8] @ omap_uart_lsr add \rp, \rp, \tmp add \rv, \rv, \tmp .endm .macro senduart,rd,rx orr \rd, \rd, \rx, lsl #24 @ preserve LSR reg offset bic \rx, \rx, #0xff @ get base (THR) reg address strb \rd, [\rx] @ send lower byte of rd orr \rx, \rx, \rd, lsr #24 @ restore original rx (LSR) bic \rd, \rd, #(0xff << 24) @ restore original rd .endm .macro busyuart,rd,rx 1001: ldrb \rd, [\rx] @ rx contains UART_LSR address and \rd, \rd, #(UART_LSR_TEMT | UART_LSR_THRE) teq \rd, #(UART_LSR_TEMT | UART_LSR_THRE) bne 1001b .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy,rd,rx .endm
aixcc-public/challenge-001-exemplar-source
1,041
arch/arm/include/debug/at91.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2003-2005 SAN People * * Debugging macro include header */ #define AT91_DBGU_SR (0x14) /* Status Register */ #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ #define AT91_DBGU_TXRDY (1 << 1) /* Transmitter Ready */ #define AT91_DBGU_TXEMPTY (1 << 9) /* Transmitter Empty */ .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS @ System peripherals (phys address) ldr \rv, =CONFIG_DEBUG_UART_VIRT @ System peripherals (virt address) .endm .macro senduart,rd,rx strb \rd, [\rx, #(AT91_DBGU_THR)] @ Write to Transmitter Holding Register .endm .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register tst \rd, #AT91_DBGU_TXRDY @ DBGU_TXRDY = 1 when ready to transmit beq 1001b .endm .macro waituartcts,rd,rx .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register tst \rd, #AT91_DBGU_TXEMPTY @ DBGU_TXEMPTY = 1 when transmission complete beq 1001b .endm
aixcc-public/challenge-001-exemplar-source
1,162
arch/arm/include/debug/stm32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) STMicroelectronics SA 2017 - All Rights Reserved * Author: Gerald Baeza <gerald.baeza@st.com> for STMicroelectronics. */ #ifdef CONFIG_STM32F4_DEBUG_UART #define STM32_USART_SR_OFF 0x00 #define STM32_USART_TDR_OFF 0x04 #endif #if defined(CONFIG_STM32F7_DEBUG_UART) || defined(CONFIG_STM32H7_DEBUG_UART) || \ defined(CONFIG_STM32MP1_DEBUG_UART) #define STM32_USART_SR_OFF 0x1C #define STM32_USART_TDR_OFF 0x28 #endif #define STM32_USART_TC (1 << 6) /* Tx complete */ #define STM32_USART_TXE (1 << 7) /* Tx data reg empty */ .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical base ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virt base .endm .macro senduart,rd,rx strb \rd, [\rx, #STM32_USART_TDR_OFF] .endm .macro waituartcts,rd,rx .endm .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #(STM32_USART_SR_OFF)] @ Read Status Register tst \rd, #STM32_USART_TXE @ TXE = 1 = tx empty beq 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #(STM32_USART_SR_OFF)] @ Read Status Register tst \rd, #STM32_USART_TC @ TC = 1 = tx complete beq 1001b .endm
aixcc-public/challenge-001-exemplar-source
1,032
arch/arm/include/debug/ux500.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Debugging macro include header * * Copyright (C) 2009 ST-Ericsson */ #if CONFIG_UX500_DEBUG_UART > 2 #error Invalid Ux500 debug UART #endif /* * DEBUG_LL only works if only one SOC is built in. We don't use #else below * in order to get "__UX500_UART redefined" warnings if more than one SOC is * built, so that there's some hint during the build that something is wrong. */ #ifdef CONFIG_UX500_SOC_DB8500 #define U8500_UART0_PHYS_BASE (0x80120000) #define U8500_UART1_PHYS_BASE (0x80121000) #define U8500_UART2_PHYS_BASE (0x80007000) #define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE #endif #if !defined(__UX500_PHYS_UART) #error Unknown SOC #endif #define UX500_PHYS_UART(n) __UX500_PHYS_UART(n) #define UART_PHYS_BASE UX500_PHYS_UART(CONFIG_UX500_DEBUG_UART) #define UART_VIRT_BASE (0xfff07000) .macro addruart, rp, rv, tmp ldr \rp, =UART_PHYS_BASE @ no, physical address ldr \rv, =UART_VIRT_BASE @ yes, virtual address .endm #include <debug/pl01x.S>
aixcc-public/challenge-001-exemplar-source
1,166
arch/arm/include/debug/exynos.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com */ /* pull in the relevant register and map files. */ #define S3C_ADDR_BASE 0xF6000000 #define S3C_VA_UART S3C_ADDR_BASE + 0x01000000 #define EXYNOS4_PA_UART 0x13800000 #define EXYNOS5_PA_UART 0x12C00000 /* note, for the boot process to work we have to keep the UART * virtual address aligned to an 1MiB boundary for the L1 * mapping the head code makes. We keep the UART virtual address * aligned and add in the offset when we load the value here. */ .macro addruart, rp, rv, tmp mrc p15, 0, \tmp, c0, c0, 0 and \tmp, \tmp, #0xf0 teq \tmp, #0xf0 @@ A15 beq 100f mrc p15, 0, \tmp, c0, c0, 5 and \tmp, \tmp, #0xf00 teq \tmp, #0x100 @@ A15 + A7 but boot to A7 100: ldreq \rp, =EXYNOS5_PA_UART movne \rp, #EXYNOS4_PA_UART @@ EXYNOS4 ldr \rv, =S3C_VA_UART #if CONFIG_DEBUG_S3C_UART != 0 add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART) add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART) #endif .endm #define fifo_full fifo_full_s5pv210 #define fifo_level fifo_level_s5pv210 #include <debug/samsung.S>
aixcc-public/challenge-001-exemplar-source
6,500
arch/arm/include/debug/tegra.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2010,2011 Google, Inc. * Copyright (C) 2011-2012 NVIDIA CORPORATION. All Rights Reserved. * * Author: * Colin Cross <ccross@google.com> * Erik Gilling <konkers@google.com> * Doug Anderson <dianders@chromium.org> * Stephen Warren <swarren@nvidia.com> * * Portions based on mach-omap2's debug-macro.S * Copyright (C) 1994-1999 Russell King */ #include <linux/serial_reg.h> #define UART_SHIFT 2 /* Physical addresses */ #define TEGRA_CLK_RESET_BASE 0x60006000 #define TEGRA_APB_MISC_BASE 0x70000000 #define TEGRA_UARTA_BASE 0x70006000 #define TEGRA_UARTB_BASE 0x70006040 #define TEGRA_UARTC_BASE 0x70006200 #define TEGRA_UARTD_BASE 0x70006300 #define TEGRA_UARTE_BASE 0x70006400 #define TEGRA_PMC_BASE 0x7000e400 #define TEGRA_CLK_RST_DEVICES_L (TEGRA_CLK_RESET_BASE + 0x04) #define TEGRA_CLK_RST_DEVICES_H (TEGRA_CLK_RESET_BASE + 0x08) #define TEGRA_CLK_RST_DEVICES_U (TEGRA_CLK_RESET_BASE + 0x0c) #define TEGRA_CLK_OUT_ENB_L (TEGRA_CLK_RESET_BASE + 0x10) #define TEGRA_CLK_OUT_ENB_H (TEGRA_CLK_RESET_BASE + 0x14) #define TEGRA_CLK_OUT_ENB_U (TEGRA_CLK_RESET_BASE + 0x18) #define TEGRA_PMC_SCRATCH20 (TEGRA_PMC_BASE + 0xa0) #define TEGRA_APB_MISC_GP_HIDREV (TEGRA_APB_MISC_BASE + 0x804) /* * Must be section-aligned since a section mapping is used early on. * Must not overlap with regions in mach-tegra/io.c:tegra_io_desc[]. */ #define UART_VIRTUAL_BASE 0xfe800000 #define checkuart(rp, rv, lhu, bit, uart) \ /* Load address of CLK_RST register */ \ ldr rp, =TEGRA_CLK_RST_DEVICES_##lhu ; \ /* Load value from CLK_RST register */ \ ldr rp, [rp, #0] ; \ /* Test UART's reset bit */ \ tst rp, #(1 << bit) ; \ /* If set, can't use UART; jump to save no UART */ \ bne 90f ; \ /* Load address of CLK_OUT_ENB register */ \ ldr rp, =TEGRA_CLK_OUT_ENB_##lhu ; \ /* Load value from CLK_OUT_ENB register */ \ ldr rp, [rp, #0] ; \ /* Test UART's clock enable bit */ \ tst rp, #(1 << bit) ; \ /* If clear, can't use UART; jump to save no UART */ \ beq 90f ; \ /* Passed all tests, load address of UART registers */ \ ldr rp, =TEGRA_UART##uart##_BASE ; \ /* Jump to save UART address */ \ b 91f .macro addruart, rp, rv, tmp adr \rp, 99f @ actual addr of 99f ldr \rv, [\rp] @ linked addr is stored there sub \rv, \rv, \rp @ offset between the two ldr \rp, [\rp, #4] @ linked tegra_uart_config sub \tmp, \rp, \rv @ actual tegra_uart_config ldr \rp, [\tmp] @ Load tegra_uart_config cmp \rp, #1 @ needs initialization? bne 100f @ no; go load the addresses mov \rv, #0 @ yes; record init is done str \rv, [\tmp] #ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA /* Check ODMDATA */ 10: ldr \rp, =TEGRA_PMC_SCRATCH20 ldr \rp, [\rp, #0] @ Load PMC_SCRATCH20 lsr \rv, \rp, #18 @ 19:18 are console type and \rv, \rv, #3 cmp \rv, #2 @ 2 and 3 mean DCC, UART beq 11f @ some boards swap the meaning cmp \rv, #3 @ so accept either bne 90f 11: lsr \rv, \rp, #15 @ 17:15 are UART ID and \rv, #7 cmp \rv, #0 @ UART 0? beq 20f cmp \rv, #1 @ UART 1? beq 21f cmp \rv, #2 @ UART 2? beq 22f cmp \rv, #3 @ UART 3? beq 23f cmp \rv, #4 @ UART 4? beq 24f b 90f @ invalid #endif #if defined(CONFIG_TEGRA_DEBUG_UARTA) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART A validity */ 20: checkuart(\rp, \rv, L, 6, A) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTB) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART B validity */ 21: checkuart(\rp, \rv, L, 7, B) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTC) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART C validity */ 22: checkuart(\rp, \rv, H, 23, C) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTD) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART D validity */ 23: checkuart(\rp, \rv, U, 1, D) #endif #if defined(CONFIG_TEGRA_DEBUG_UARTE) || \ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA) /* Check UART E validity */ 24: checkuart(\rp, \rv, U, 2, E) #endif /* No valid UART found */ 90: mov \rp, #0 /* fall through */ /* Record whichever UART we chose */ 91: str \rp, [\tmp, #4] @ Store in tegra_uart_phys cmp \rp, #0 @ Valid UART address? bne 92f @ Yes, go process it str \rp, [\tmp, #8] @ Store 0 in tegra_uart_virt b 100f @ Done 92: and \rv, \rp, #0xffffff @ offset within 1MB section add \rv, \rv, #UART_VIRTUAL_BASE str \rv, [\tmp, #8] @ Store in tegra_uart_virt b 100f .align 99: .word . #if defined(ZIMAGE) .word . + 4 /* * Storage for the state maintained by the macro. * * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c. * That's because this header is included from multiple files, and we only * want a single copy of the data. In particular, the UART probing code above * assumes it's running using physical addresses. This is true when this file * is included from head.o, but not when included from debug.o. So we need * to share the probe results between the two copies, rather than having * to re-run the probing again later. * * In the decompressor, we put the storage right here, since common.c * isn't included in the decompressor build. This storage data gets put in * .text even though it's really data, since .data is discarded from the * decompressor. Luckily, .text is writeable in the decompressor, unless * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug. */ /* Debug UART initialization required */ .word 1 /* Debug UART physical address */ .word 0 /* Debug UART virtual address */ .word 0 #else .word tegra_uart_config #endif .ltorg /* Load previously selected UART address */ 100: ldr \rp, [\tmp, #4] @ Load tegra_uart_phys ldr \rv, [\tmp, #8] @ Load tegra_uart_virt .endm /* * Code below is swiped from <asm/hardware/debug-8250.S>, but add an extra * check to make sure that the UART address is actually valid. */ .macro senduart, rd, rx cmp \rx, #0 strbne \rd, [\rx, #UART_TX << UART_SHIFT] 1001: .endm .macro busyuart, rd, rx cmp \rx, #0 beq 1002f 1001: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT] and \rd, \rd, #UART_LSR_THRE teq \rd, #UART_LSR_THRE bne 1001b 1002: .endm .macro waituartcts, rd, rx cmp \rx, #0 beq 1002f 1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT] tst \rd, #UART_MSR_CTS beq 1001b 1002: .endm .macro waituarttxrdy,rd,rx .endm
aixcc-public/challenge-001-exemplar-source
1,216
arch/arm/include/debug/vexpress.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/mach-realview/include/mach/debug-macro.S * * Debugging macro include header * * Copyright (C) 1994-1999 Russell King * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks */ #define DEBUG_LL_PHYS_BASE 0x10000000 #define DEBUG_LL_UART_OFFSET 0x00009000 #define DEBUG_LL_PHYS_BASE_RS1 0x1c000000 #define DEBUG_LL_UART_OFFSET_RS1 0x00090000 #define DEBUG_LL_UART_PHYS_CRX 0xb0090000 #define DEBUG_LL_VIRT_BASE 0xf8000000 #if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT) .macro addruart,rp,rv,tmp .arch armv7-a @ Make an educated guess regarding the memory map: @ - the original A9 core tile (based on ARM Cortex-A9 r0p1) @ should use UART at 0x10009000 @ - all other (RS1 complaint) tiles use UART mapped @ at 0x1c090000 mrc p15, 0, \rp, c0, c0, 0 movw \rv, #0xc091 movt \rv, #0x410f cmp \rp, \rv @ Original memory map moveq \rp, #DEBUG_LL_UART_OFFSET orreq \rv, \rp, #DEBUG_LL_VIRT_BASE orreq \rp, \rp, #DEBUG_LL_PHYS_BASE @ RS1 memory map movne \rp, #DEBUG_LL_UART_OFFSET_RS1 orrne \rv, \rp, #DEBUG_LL_VIRT_BASE orrne \rp, \rp, #DEBUG_LL_PHYS_BASE_RS1 .endm #include <debug/pl01x.S> #endif
aixcc-public/challenge-001-exemplar-source
3,677
arch/arm64/xen/hypercall.S
/****************************************************************************** * hypercall.S * * Xen hypercall wrappers * * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /* * The Xen hypercall calling convention is very similar to the procedure * call standard for the ARM 64-bit architecture: the first parameter is * passed in x0, the second in x1, the third in x2, the fourth in x3 and * the fifth in x4. * * The hypercall number is passed in x16. * * The return value is in x0. * * The hvc ISS is required to be 0xEA1, that is the Xen specific ARM * hypercall tag. * * Parameter structs passed to hypercalls are laid out according to * the ARM 64-bit EABI standard. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-uaccess.h> #include <xen/interface/xen.h> #define XEN_IMM 0xEA1 #define HYPERCALL_SIMPLE(hypercall) \ SYM_FUNC_START(HYPERVISOR_##hypercall) \ mov x16, #__HYPERVISOR_##hypercall; \ hvc XEN_IMM; \ ret; \ SYM_FUNC_END(HYPERVISOR_##hypercall) #define HYPERCALL0 HYPERCALL_SIMPLE #define HYPERCALL1 HYPERCALL_SIMPLE #define HYPERCALL2 HYPERCALL_SIMPLE #define HYPERCALL3 HYPERCALL_SIMPLE #define HYPERCALL4 HYPERCALL_SIMPLE #define HYPERCALL5 HYPERCALL_SIMPLE .text HYPERCALL2(xen_version); HYPERCALL3(console_io); HYPERCALL3(grant_table_op); HYPERCALL2(sched_op); HYPERCALL2(event_channel_op); HYPERCALL2(hvm_op); HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); HYPERCALL1(platform_op_raw); HYPERCALL2(multicall); HYPERCALL2(vm_assist); HYPERCALL3(dm_op); SYM_FUNC_START(privcmd_call) mov x16, x0 mov x0, x1 mov x1, x2 mov x2, x3 mov x3, x4 mov x4, x5 /* * Privcmd calls are issued by the userspace. The kernel needs to * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 * translations to user memory via AT instructions. Since AT * instructions are not affected by the PAN bit (ARMv8.1), we only * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * is enabled (it implies that hardware UAO and PAN disabled). */ uaccess_ttbr0_enable x6, x7, x8 hvc XEN_IMM /* * Disable userspace access from kernel once the hyp call completed. */ uaccess_ttbr0_disable x6, x7 ret SYM_FUNC_END(privcmd_call);
aixcc-public/challenge-001-exemplar-source
24,130
arch/arm64/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Low-level CPU initialisation * Based on arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King * Copyright (C) 2003-2012 ARM Ltd. * Authors: Catalin Marinas <catalin.marinas@arm.com> * Will Deacon <will.deacon@arm.com> */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/asm_pointer_auth.h> #include <asm/assembler.h> #include <asm/boot.h> #include <asm/bug.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/cputype.h> #include <asm/el2_setup.h> #include <asm/elf.h> #include <asm/image.h> #include <asm/kernel-pgtable.h> #include <asm/kvm_arm.h> #include <asm/memory.h> #include <asm/pgtable-hwdef.h> #include <asm/page.h> #include <asm/scs.h> #include <asm/smp.h> #include <asm/sysreg.h> #include <asm/thread_info.h> #include <asm/virt.h> #include "efi-header.S" #if (PAGE_OFFSET & 0x1fffff) != 0 #error PAGE_OFFSET must be at least 2MB aligned #endif /* * Kernel startup entry point. * --------------------------- * * The requirements are: * MMU = off, D-cache = off, I-cache = on or off, * x0 = physical address to the FDT blob. * * Note that the callee-saved registers are used for storing variables * that are useful before the MMU is enabled. The allocations are described * in the entry routines. */ __HEAD /* * DO NOT MODIFY. Image header expected by Linux boot-loaders. */ efi_signature_nop // special NOP to identity as PE/COFF executable b primary_entry // branch to kernel start, magic .quad 0 // Image load offset from start of RAM, little-endian le64sym _kernel_size_le // Effective size of kernel image, little-endian le64sym _kernel_flags_le // Informative flags, little-endian .quad 0 // reserved .quad 0 // reserved .quad 0 // reserved .ascii ARM64_IMAGE_MAGIC // Magic number .long .Lpe_header_offset // Offset to the PE header. __EFI_PE_HEADER __INIT /* * The following callee saved general purpose registers are used on the * primary lowlevel boot path: * * Register Scope Purpose * x20 primary_entry() .. __primary_switch() CPU boot mode * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 * x22 create_idmap() .. start_kernel() ID map VA of the DT blob * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset * x24 __primary_switch() linear map KASLR seed * x25 primary_entry() .. start_kernel() supported VA size * x28 create_idmap() callee preserved temp register */ SYM_CODE_START(primary_entry) bl preserve_boot_args bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 bl create_idmap /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for * details. * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ #if VA_BITS > 48 mrs_s x0, SYS_ID_AA64MMFR2_EL1 tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT mov x0, #VA_BITS mov x25, #VA_BITS_MIN csel x25, x25, x0, eq mov x0, x25 #endif bl __cpu_setup // initialise processor b __primary_switch SYM_CODE_END(primary_entry) /* * Preserve the arguments passed by the bootloader in x0 .. x3 */ SYM_CODE_START_LOCAL(preserve_boot_args) mov x21, x0 // x21=FDT adr_l x0, boot_args // record the contents of stp x21, x1, [x0] // x0 .. x3 at kernel entry stp x2, x3, [x0, #16] dmb sy // needed before dc ivac with // MMU off add x1, x0, #0x20 // 4 x 8 bytes b dcache_inval_poc // tail call SYM_CODE_END(preserve_boot_args) SYM_FUNC_START_LOCAL(clear_page_tables) /* * Clear the init page tables. */ adrp x0, init_pg_dir adrp x1, init_pg_end sub x2, x1, x0 mov x1, xzr b __pi_memset // tail call SYM_FUNC_END(clear_page_tables) /* * Macro to populate page table entries, these entries can be pointers to the next level * or last level entries pointing to physical memory. * * tbl: page table address * rtbl: pointer to page table or physical memory * index: start index to write * eindex: end index to write - [index, eindex] written to * flags: flags for pagetable entry to or in * inc: increment to rtbl between each entry * tmp1: temporary variable * * Preserves: tbl, eindex, flags, inc * Corrupts: index, tmp1 * Returns: rtbl */ .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 .Lpe\@: phys_to_pte \tmp1, \rtbl orr \tmp1, \tmp1, \flags // tmp1 = table entry str \tmp1, [\tbl, \index, lsl #3] add \rtbl, \rtbl, \inc // rtbl = pa next level add \index, \index, #1 cmp \index, \eindex b.ls .Lpe\@ .endm /* * Compute indices of table entries from virtual address range. If multiple entries * were needed in the previous page table level then the next page table level is assumed * to be composed of multiple pages. (This effectively scales the end index). * * vstart: virtual address of start of range * vend: virtual address of end of range - we map [vstart, vend] * shift: shift used to transform virtual address into index * order: #imm 2log(number of entries in page table) * istart: index in table corresponding to vstart * iend: index in table corresponding to vend * count: On entry: how many extra entries were required in previous level, scales * our end index. * On exit: returns how many extra entries required for next page table level * * Preserves: vstart, vend * Returns: istart, iend, count */ .macro compute_indices, vstart, vend, shift, order, istart, iend, count ubfx \istart, \vstart, \shift, \order ubfx \iend, \vend, \shift, \order add \iend, \iend, \count, lsl \order sub \count, \iend, \istart .endm /* * Map memory for specified virtual address range. Each level of page table needed supports * multiple entries. If a level requires n entries the next page table level is assumed to be * formed from n pages. * * tbl: location of page table * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) * vstart: virtual address of start of range * vend: virtual address of end of range - we map [vstart, vend - 1] * flags: flags to use to map last level entries * phys: physical address corresponding to vstart - physical memory is contiguous * order: #imm 2log(number of entries in PGD table) * * If extra_shift is set, an extra level will be populated if the end address does * not fit in 'extra_shift' bits. This assumes vend is in the TTBR0 range. * * Temporaries: istart, iend, tmp, count, sv - these need to be different registers * Preserves: vstart, flags * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv */ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift sub \vend, \vend, #1 add \rtbl, \tbl, #PAGE_SIZE mov \count, #0 .ifnb \extra_shift tst \vend, #~((1 << (\extra_shift)) - 1) b.eq .L_\@ compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count mov \sv, \rtbl populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv .endif .L_\@: compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count mov \sv, \rtbl populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv #if SWAPPER_PGTABLE_LEVELS > 3 compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count mov \sv, \rtbl populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv #endif #if SWAPPER_PGTABLE_LEVELS > 2 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count mov \sv, \rtbl populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv #endif compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count bic \rtbl, \phys, #SWAPPER_BLOCK_SIZE - 1 populate_entries \tbl, \rtbl, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp .endm /* * Remap a subregion created with the map_memory macro with modified attributes * or output address. The entire remapped region must have been covered in the * invocation of map_memory. * * x0: last level table address (returned in first argument to map_memory) * x1: start VA of the existing mapping * x2: start VA of the region to update * x3: end VA of the region to update (exclusive) * x4: start PA associated with the region to update * x5: attributes to set on the updated region * x6: order of the last level mappings */ SYM_FUNC_START_LOCAL(remap_region) sub x3, x3, #1 // make end inclusive // Get the index offset for the start of the last level table lsr x1, x1, x6 bfi x1, xzr, #0, #PAGE_SHIFT - 3 // Derive the start and end indexes into the last level table // associated with the provided region lsr x2, x2, x6 lsr x3, x3, x6 sub x2, x2, x1 sub x3, x3, x1 mov x1, #1 lsl x6, x1, x6 // block size at this level populate_entries x0, x4, x2, x3, x5, x6, x7 ret SYM_FUNC_END(remap_region) SYM_FUNC_START_LOCAL(create_idmap) mov x28, lr /* * The ID map carries a 1:1 mapping of the physical address range * covered by the loaded image, which could be anywhere in DRAM. This * means that the required size of the VA (== PA) space is decided at * boot time, and could be more than the configured size of the VA * space for ordinary kernel and user space mappings. * * There are three cases to consider here: * - 39 <= VA_BITS < 48, and the ID map needs up to 48 VA bits to cover * the placement of the image. In this case, we configure one extra * level of translation on the fly for the ID map only. (This case * also covers 42-bit VA/52-bit PA on 64k pages). * * - VA_BITS == 48, and the ID map needs more than 48 VA bits. This can * only happen when using 64k pages, in which case we need to extend * the root level table rather than add a level. Note that we can * treat this case as 'always extended' as long as we take care not * to program an unsupported T0SZ value into the TCR register. * * - Combinations that would require two additional levels of * translation are not supported, e.g., VA_BITS==36 on 16k pages, or * VA_BITS==39/4k pages with 5-level paging, where the input address * requires more than 47 or 48 bits, respectively. */ #if (VA_BITS < 48) #define IDMAP_PGD_ORDER (VA_BITS - PGDIR_SHIFT) #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) /* * If VA_BITS < 48, we have to configure an additional table level. * First, we have to verify our assumption that the current value of * VA_BITS was chosen such that all translation levels are fully * utilised, and that lowering T0SZ will always result in an additional * translation level to be configured. */ #if VA_BITS != EXTRA_SHIFT #error "Mismatch between VA_BITS and page size/number of translation levels" #endif #else #define IDMAP_PGD_ORDER (PHYS_MASK_SHIFT - PGDIR_SHIFT) #define EXTRA_SHIFT /* * If VA_BITS == 48, we don't have to configure an additional * translation level, but the top-level table has more entries. */ #endif adrp x0, init_idmap_pg_dir adrp x3, _text adrp x6, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE mov x7, SWAPPER_RX_MMUFLAGS map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT /* Remap the kernel page tables r/w in the ID map */ adrp x1, _text adrp x2, init_pg_dir adrp x3, init_pg_end bic x4, x2, #SWAPPER_BLOCK_SIZE - 1 mov x5, SWAPPER_RW_MMUFLAGS mov x6, #SWAPPER_BLOCK_SHIFT bl remap_region /* Remap the FDT after the kernel image */ adrp x1, _text adrp x22, _end + SWAPPER_BLOCK_SIZE bic x2, x22, #SWAPPER_BLOCK_SIZE - 1 bfi x22, x21, #0, #SWAPPER_BLOCK_SHIFT // remapped FDT address add x3, x2, #MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE bic x4, x21, #SWAPPER_BLOCK_SIZE - 1 mov x5, SWAPPER_RW_MMUFLAGS mov x6, #SWAPPER_BLOCK_SHIFT bl remap_region /* * Since the page tables have been populated with non-cacheable * accesses (MMU disabled), invalidate those tables again to * remove any speculatively loaded cache lines. */ dmb sy adrp x0, init_idmap_pg_dir adrp x1, init_idmap_pg_end bl dcache_inval_poc ret x28 SYM_FUNC_END(create_idmap) SYM_FUNC_START_LOCAL(create_kernel_mapping) adrp x0, init_pg_dir mov_q x5, KIMAGE_VADDR // compile time __va(_text) #ifdef CONFIG_RELOCATABLE add x5, x5, x23 // add KASLR displacement #endif adrp x6, _end // runtime __pa(_end) adrp x3, _text // runtime __pa(_text) sub x6, x6, x3 // _end - _text add x6, x6, x5 // runtime __va(_end) mov x7, SWAPPER_RW_MMUFLAGS map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14 dsb ishst // sync with page table walker ret SYM_FUNC_END(create_kernel_mapping) /* * Initialize CPU registers with task-specific and cpu-specific context. * * Create a final frame record at task_pt_regs(current)->stackframe, so * that the unwinder can identify the final frame record of any task by * its location in the task stack. We reserve the entire pt_regs space * for consistency with user tasks and kthreads. */ .macro init_cpu_task tsk, tmp1, tmp2 msr sp_el0, \tsk ldr \tmp1, [\tsk, #TSK_STACK] add sp, \tmp1, #THREAD_SIZE sub sp, sp, #PT_REGS_SIZE stp xzr, xzr, [sp, #S_STACKFRAME] add x29, sp, #S_STACKFRAME scs_load_current adr_l \tmp1, __per_cpu_offset ldr w\tmp2, [\tsk, #TSK_TI_CPU] ldr \tmp1, [\tmp1, \tmp2, lsl #3] set_this_cpu_offset \tmp1 .endm /* * The following fragment of code is executed with the MMU enabled. * * x0 = __pa(KERNEL_START) */ SYM_FUNC_START_LOCAL(__primary_switched) adr_l x4, init_task init_cpu_task x4, x5, x6 adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb stp x29, x30, [sp, #-16]! mov x29, sp str_l x21, __fdt_pointer, x5 // Save FDT pointer ldr_l x4, kimage_vaddr // Save the offset between sub x4, x4, x0 // the kernel virtual and str_l x4, kimage_voffset, x5 // physical mappings mov x0, x20 bl set_cpu_boot_mode_flag // Clear BSS adr_l x0, __bss_start mov x1, xzr adr_l x2, __bss_stop sub x2, x2, x0 bl __pi_memset dsb ishst // Make zero page visible to PTW #if VA_BITS > 48 adr_l x8, vabits_actual // Set this early so KASAN early init str x25, [x8] // ... observes the correct value dc civac, x8 // Make visible to booting secondaries #endif #ifdef CONFIG_RANDOMIZE_BASE adrp x5, memstart_offset_seed // Save KASLR linear map seed strh w24, [x5, :lo12:memstart_offset_seed] #endif #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) bl kasan_early_init #endif mov x0, x21 // pass FDT address in x0 bl early_fdt_map // Try mapping the FDT early mov x0, x20 // pass the full boot status bl init_feature_override // Parse cpu feature overrides mov x0, x20 bl finalise_el2 // Prefer VHE if possible ldp x29, x30, [sp], #16 bl start_kernel ASM_BUG() SYM_FUNC_END(__primary_switched) /* * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ .section ".idmap.text","awx" /* * Starting from EL2 or EL1, configure the CPU to execute at the highest * reachable EL supported by the kernel in a chosen default state. If dropping * from EL2 to EL1, configure EL2 before configuring EL1. * * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET. * * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if * booted in EL1 or EL2 respectively, with the top 32 bits containing * potential context flags. These flags are *not* stored in __boot_cpu_mode. */ SYM_FUNC_START(init_kernel_el) mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq init_el2 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) mov_q x0, INIT_SCTLR_EL1_MMU_OFF msr sctlr_el1, x0 isb mov_q x0, INIT_PSTATE_EL1 msr spsr_el1, x0 msr elr_el1, lr mov w0, #BOOT_CPU_MODE_EL1 eret SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) mov_q x0, HCR_HOST_NVHE_FLAGS msr hcr_el2, x0 isb init_el2_state /* Hypervisor stub */ adr_l x0, __hyp_stub_vectors msr vbar_el2, x0 isb mov_q x1, INIT_SCTLR_EL1_MMU_OFF /* * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, * making it impossible to start in nVHE mode. Is that * compliant with the architecture? Absolutely not! */ mrs x0, hcr_el2 and x0, x0, #HCR_E2H cbz x0, 1f /* Set a sane SCTLR_EL1, the VHE way */ msr_s SYS_SCTLR_EL12, x1 mov x2, #BOOT_CPU_FLAG_E2H b 2f 1: msr sctlr_el1, x1 mov x2, xzr 2: msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 orr x0, x0, x2 eret SYM_FUNC_END(init_kernel_el) /* * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in w0. See arch/arm64/include/asm/virt.h for more info. */ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) adr_l x1, __boot_cpu_mode cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 1: str w0, [x1] // Save CPU boot mode ret SYM_FUNC_END(set_cpu_boot_mode_flag) /* * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ SYM_FUNC_START(secondary_holding_pen) bl init_kernel_el // w0=cpu_boot_mode mrs x2, mpidr_el1 mov_q x1, MPIDR_HWID_BITMASK and x2, x2, x1 adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] cmp x4, x2 b.eq secondary_startup wfe b pen SYM_FUNC_END(secondary_holding_pen) /* * Secondary entry point that jumps straight into the kernel. Only to * be used where CPUs are brought online dynamically by the kernel. */ SYM_FUNC_START(secondary_entry) bl init_kernel_el // w0=cpu_boot_mode b secondary_startup SYM_FUNC_END(secondary_entry) SYM_FUNC_START_LOCAL(secondary_startup) /* * Common entry point for secondary CPUs. */ mov x20, x0 // preserve boot mode bl finalise_el2 bl __cpu_secondary_check52bitva #if VA_BITS > 48 ldr_l x0, vabits_actual #endif bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir adrp x2, idmap_pg_dir bl __enable_mmu ldr x8, =__secondary_switched br x8 SYM_FUNC_END(secondary_startup) SYM_FUNC_START_LOCAL(__secondary_switched) mov x0, x20 bl set_cpu_boot_mode_flag str_l xzr, __early_cpu_boot_status, x3 adr_l x5, vectors msr vbar_el1, x5 isb adr_l x0, secondary_data ldr x2, [x0, #CPU_BOOT_TASK] cbz x2, __secondary_too_slow init_cpu_task x2, x1, x3 #ifdef CONFIG_ARM64_PTR_AUTH ptrauth_keys_init_cpu x2, x3, x4, x5 #endif bl secondary_start_kernel ASM_BUG() SYM_FUNC_END(__secondary_switched) SYM_FUNC_START_LOCAL(__secondary_too_slow) wfe wfi b __secondary_too_slow SYM_FUNC_END(__secondary_too_slow) /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. * * update_early_cpu_boot_status tmp, status * - Corrupts tmp1, tmp2 * - Writes 'status' to __early_cpu_boot_status and makes sure * it is committed to memory. */ .macro update_early_cpu_boot_status status, tmp1, tmp2 mov \tmp2, #\status adr_l \tmp1, __early_cpu_boot_status str \tmp2, [\tmp1] dmb sy dc ivac, \tmp1 // Invalidate potentially stale cache line .endm /* * Enable the MMU. * * x0 = SCTLR_EL1 value for turning on the MMU. * x1 = TTBR1_EL1 value * x2 = ID map root table address * * Returns to the caller via x30/lr. This requires the caller to be covered * by the .idmap.text section. * * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ SYM_FUNC_START(__enable_mmu) mrs x3, ID_AA64MMFR0_EL1 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN b.lt __no_granule_support cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX b.gt __no_granule_support phys_to_ttbr x2, x2 msr ttbr0_el1, x2 // load TTBR0 load_ttbr1 x1, x1, x3 set_sctlr_el1 x0 ret SYM_FUNC_END(__enable_mmu) SYM_FUNC_START(__cpu_secondary_check52bitva) #if VA_BITS > 48 ldr_l x0, vabits_actual cmp x0, #52 b.ne 2f mrs_s x0, SYS_ID_AA64MMFR2_EL1 and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT) cbnz x0, 2f update_early_cpu_boot_status \ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 1: wfe wfi b 1b #endif 2: ret SYM_FUNC_END(__cpu_secondary_check52bitva) SYM_FUNC_START_LOCAL(__no_granule_support) /* Indicate that this CPU can't boot and is stuck in the kernel */ update_early_cpu_boot_status \ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 1: wfe wfi b 1b SYM_FUNC_END(__no_granule_support) #ifdef CONFIG_RELOCATABLE SYM_FUNC_START_LOCAL(__relocate_kernel) /* * Iterate over each entry in the relocation table, and apply the * relocations in place. */ adr_l x9, __rela_start adr_l x10, __rela_end mov_q x11, KIMAGE_VADDR // default virtual offset add x11, x11, x23 // actual virtual offset 0: cmp x9, x10 b.hs 1f ldp x12, x13, [x9], #24 ldr x14, [x9, #-8] cmp w13, #R_AARCH64_RELATIVE b.ne 0b add x14, x14, x23 // relocate str x14, [x12, x23] b 0b 1: #ifdef CONFIG_RELR /* * Apply RELR relocations. * * RELR is a compressed format for storing relative relocations. The * encoded sequence of entries looks like: * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] * * i.e. start with an address, followed by any number of bitmaps. The * address entry encodes 1 relocation. The subsequent bitmap entries * encode up to 63 relocations each, at subsequent offsets following * the last address entry. * * The bitmap entries must have 1 in the least significant bit. The * assumption here is that an address cannot have 1 in lsb. Odd * addresses are not supported. Any odd addresses are stored in the RELA * section, which is handled above. * * Excluding the least significant bit in the bitmap, each non-zero * bit in the bitmap represents a relocation to be applied to * a corresponding machine word that follows the base address * word. The second least significant bit represents the machine * word immediately following the initial address, and each bit * that follows represents the next word, in linear order. As such, * a single bitmap can encode up to 63 relocations in a 64-bit object. * * In this implementation we store the address of the next RELR table * entry in x9, the address being relocated by the current address or * bitmap entry in x13 and the address being relocated by the current * bit in x14. */ adr_l x9, __relr_start adr_l x10, __relr_end 2: cmp x9, x10 b.hs 7f ldr x11, [x9], #8 tbnz x11, #0, 3f // branch to handle bitmaps add x13, x11, x23 ldr x12, [x13] // relocate address entry add x12, x12, x23 str x12, [x13], #8 // adjust to start of bitmap b 2b 3: mov x14, x13 4: lsr x11, x11, #1 cbz x11, 6f tbz x11, #0, 5f // skip bit if not set ldr x12, [x14] // relocate bit add x12, x12, x23 str x12, [x14] 5: add x14, x14, #8 // move to next bit's address b 4b 6: /* * Move to the next bitmap's address. 8 is the word size, and 63 is the * number of significant bits in a bitmap entry. */ add x13, x13, #(8 * 63) b 2b 7: #endif ret SYM_FUNC_END(__relocate_kernel) #endif SYM_FUNC_START_LOCAL(__primary_switch) adrp x1, reserved_pg_dir adrp x2, init_idmap_pg_dir bl __enable_mmu #ifdef CONFIG_RELOCATABLE adrp x23, KERNEL_START and x23, x23, MIN_KIMG_ALIGN - 1 #ifdef CONFIG_RANDOMIZE_BASE mov x0, x22 adrp x1, init_pg_end mov sp, x1 mov x29, xzr bl __pi_kaslr_early_init and x24, x0, #SZ_2M - 1 // capture memstart offset seed bic x0, x0, #SZ_2M - 1 orr x23, x23, x0 // record kernel offset #endif #endif bl clear_page_tables bl create_kernel_mapping adrp x1, init_pg_dir load_ttbr1 x1, x1, x2 #ifdef CONFIG_RELOCATABLE bl __relocate_kernel #endif ldr x8, =__primary_switched adrp x0, KERNEL_START // __pa(KERNEL_START) br x8 SYM_FUNC_END(__primary_switch)
aixcc-public/challenge-001-exemplar-source
5,118
arch/arm64/kernel/efi-header.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2017 Linaro, Ltd. * Copyright (C) 2013, 2014 Red Hat, Inc. */ #include <linux/pe.h> #include <linux/sizes.h> .macro efi_signature_nop #ifdef CONFIG_EFI .L_head: /* * This ccmp instruction has no meaningful effect except that * its opcode forms the magic "MZ" signature required by UEFI. */ ccmp x18, #0, #0xd, pl #else /* * Bootloaders may inspect the opcode at the start of the kernel * image to decide if the kernel is capable of booting via UEFI. * So put an ordinary NOP here, not the "MZ.." pseudo-nop above. */ nop #endif .endm .macro __EFI_PE_HEADER #ifdef CONFIG_EFI .set .Lpe_header_offset, . - .L_head .long PE_MAGIC .short IMAGE_FILE_MACHINE_ARM64 // Machine .short .Lsection_count // NumberOfSections .long 0 // TimeDateStamp .long 0 // PointerToSymbolTable .long 0 // NumberOfSymbols .short .Lsection_table - .Loptional_header // SizeOfOptionalHeader .short IMAGE_FILE_DEBUG_STRIPPED | \ IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics .Loptional_header: .short PE_OPT_MAGIC_PE32PLUS // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion .long __initdata_begin - .Lefi_header_end // SizeOfCode .long __pecoff_data_size // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_efi_pe_entry - .L_head // AddressOfEntryPoint .long .Lefi_header_end - .L_head // BaseOfCode .quad 0 // ImageBase .long SEGMENT_ALIGN // SectionAlignment .long PECOFF_FILE_ALIGNMENT // FileAlignment .short 0 // MajorOperatingSystemVersion .short 0 // MinorOperatingSystemVersion .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion .short 0 // MajorSubsystemVersion .short 0 // MinorSubsystemVersion .long 0 // Win32VersionValue .long _end - .L_head // SizeOfImage // Everything before the kernel image is considered part of the header .long .Lefi_header_end - .L_head // SizeOfHeaders .long 0 // CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics .quad 0 // SizeOfStackReserve .quad 0 // SizeOfStackCommit .quad 0 // SizeOfHeapReserve .quad 0 // SizeOfHeapCommit .long 0 // LoaderFlags .long (.Lsection_table - .) / 8 // NumberOfRvaAndSizes .quad 0 // ExportTable .quad 0 // ImportTable .quad 0 // ResourceTable .quad 0 // ExceptionTable .quad 0 // CertificationTable .quad 0 // BaseRelocationTable #ifdef CONFIG_DEBUG_EFI .long .Lefi_debug_table - .L_head // DebugTable .long .Lefi_debug_table_size #endif // Section table .Lsection_table: .ascii ".text\0\0\0" .long __initdata_begin - .Lefi_header_end // VirtualSize .long .Lefi_header_end - .L_head // VirtualAddress .long __initdata_begin - .Lefi_header_end // SizeOfRawData .long .Lefi_header_end - .L_head // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers .short 0 // NumberOfRelocations .short 0 // NumberOfLineNumbers .long IMAGE_SCN_CNT_CODE | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE // Characteristics .ascii ".data\0\0\0" .long __pecoff_data_size // VirtualSize .long __initdata_begin - .L_head // VirtualAddress .long __pecoff_data_rawsize // SizeOfRawData .long __initdata_begin - .L_head // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers .short 0 // NumberOfRelocations .short 0 // NumberOfLineNumbers .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_WRITE // Characteristics .set .Lsection_count, (. - .Lsection_table) / 40 #ifdef CONFIG_DEBUG_EFI /* * The debug table is referenced via its Relative Virtual Address (RVA), * which is only defined for those parts of the image that are covered * by a section declaration. Since this header is not covered by any * section, the debug table must be emitted elsewhere. So stick it in * the .init.rodata section instead. * * Note that the EFI debug entry itself may legally have a zero RVA, * which means we can simply put it right after the section headers. */ __INITRODATA .align 2 .Lefi_debug_table: // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY .long 0 // Characteristics .long 0 // TimeDateStamp .short 0 // MajorVersion .short 0 // MinorVersion .long IMAGE_DEBUG_TYPE_CODEVIEW // Type .long .Lefi_debug_entry_size // SizeOfData .long 0 // RVA .long .Lefi_debug_entry - .L_head // FileOffset .set .Lefi_debug_table_size, . - .Lefi_debug_table .previous .Lefi_debug_entry: // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY .ascii "NB10" // Signature .long 0 // Unknown .long 0 // Unknown2 .long 0 // Unknown3 .asciz VMLINUX_PATH .set .Lefi_debug_entry_size, . - .Lefi_debug_entry #endif .balign SEGMENT_ALIGN .Lefi_header_end: #else .set .Lpe_header_offset, 0x0 #endif .endm
aixcc-public/challenge-001-exemplar-source
28,951
arch/arm64/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Low-level exception handling code * * Copyright (C) 2012 ARM Ltd. * Authors: Catalin Marinas <catalin.marinas@arm.com> * Will Deacon <will.deacon@arm.com> */ #include <linux/arm-smccc.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/asm_pointer_auth.h> #include <asm/bug.h> #include <asm/cpufeature.h> #include <asm/errno.h> #include <asm/esr.h> #include <asm/irq.h> #include <asm/memory.h> #include <asm/mmu.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/scs.h> #include <asm/thread_info.h> #include <asm/asm-uaccess.h> #include <asm/unistd.h> .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 mov x\n, xzr .endr .endm .macro kernel_ventry, el:req, ht:req, regsize:req, label:req .align 7 .Lventry_start\@: .if \el == 0 /* * This must be the first instruction of the EL0 vector entries. It is * skipped by the trampoline vectors, to trigger the cleanup. */ b .Lskip_tramp_vectors_cleanup\@ .if \regsize == 64 mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else mov x30, xzr .endif .Lskip_tramp_vectors_cleanup\@: .endif sub sp, sp, #PT_REGS_SIZE #ifdef CONFIG_VMAP_STACK /* * Test whether the SP has overflowed, without corrupting a GPR. * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) * should always be zero. */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp b el\el\ht\()_\regsize\()_\label 0: /* * Either we've just detected an overflow, or we've taken an exception * while on the overflow stack. Either way, we won't return to * userspace, and can clobber EL0 registers to free up GPRs. */ /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ msr tpidr_el0, x0 /* Recover the original x0 value and stash it in tpidrro_el0 */ sub x0, sp, x0 msr tpidrro_el0, x0 /* Switch to the overflow stack */ adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 /* * Check whether we were already on the overflow stack. This may happen * after panic() re-enables interrupts. */ mrs x0, tpidr_el0 // sp of interrupted context sub x0, sp, x0 // delta with top of overflow stack tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? b.ne __bad_stack // no? -> bad stack pointer /* We were already on the overflow stack. Restore sp/x0 and carry on. */ sub sp, sp, x0 mrs x0, tpidrro_el0 #endif b el\el\ht\()_\regsize\()_\label .org .Lventry_start\@ + 128 // Did we overflow the ventry slot? .endm .macro tramp_alias, dst, sym, tmp mov_q \dst, TRAMP_VALIAS adr_l \tmp, \sym add \dst, \dst, \tmp adr_l \tmp, .entry.tramp.text sub \dst, \dst, \tmp .endm /* * This macro corrupts x0-x3. It is the caller's duty to save/restore * them if required. */ .macro apply_ssbd, state, tmp1, tmp2 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable b .L__asm_ssbd_skip\@ // Patched to NOP alternative_cb_end ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 cbz \tmp2, .L__asm_ssbd_skip\@ ldr \tmp2, [tsk, #TSK_TI_FLAGS] tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end .L__asm_ssbd_skip\@: .endm /* Check for MTE asynchronous tag check faults */ .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr #ifdef CONFIG_ARM64_MTE .arch_extension lse alternative_if_not ARM64_MTE b 1f alternative_else_nop_endif /* * Asynchronous tag check faults are only possible in ASYNC (2) or * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is * set, so skip the check if it is unset. */ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f mrs_s \tmp, SYS_TFSRE0_EL1 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ mov \tmp, #_TIF_MTE_ASYNC_FAULT add \ti_flags, tsk, #TSK_TI_FLAGS stset \tmp, [\ti_flags] 1: #endif .endm /* Clear the MTE asynchronous tag check faults */ .macro clear_mte_async_tcf thread_sctlr #ifdef CONFIG_ARM64_MTE alternative_if ARM64_MTE /* See comment in check_mte_async_tcf above. */ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f dsb ish msr_s SYS_TFSRE0_EL1, xzr 1: alternative_else_nop_endif #endif .endm .macro mte_set_gcr, mte_ctrl, tmp #ifdef CONFIG_ARM64_MTE ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 orr \tmp, \tmp, #SYS_GCR_EL1_RRND msr_s SYS_GCR_EL1, \tmp #endif .endm .macro mte_set_kernel_gcr, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end mov \tmp, KERNEL_GCR_EL1 msr_s SYS_GCR_EL1, \tmp 1: #endif .endm .macro mte_set_user_gcr, tsk, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end ldr \tmp, [\tsk, #THREAD_MTE_CTRL] mte_set_gcr \tmp, \tmp2 1: #endif .endm .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 .endif stp x0, x1, [sp, #16 * 0] stp x2, x3, [sp, #16 * 1] stp x4, x5, [sp, #16 * 2] stp x6, x7, [sp, #16 * 3] stp x8, x9, [sp, #16 * 4] stp x10, x11, [sp, #16 * 5] stp x12, x13, [sp, #16 * 6] stp x14, x15, [sp, #16 * 7] stp x16, x17, [sp, #16 * 8] stp x18, x19, [sp, #16 * 9] stp x20, x21, [sp, #16 * 10] stp x22, x23, [sp, #16 * 11] stp x24, x25, [sp, #16 * 12] stp x26, x27, [sp, #16 * 13] stp x28, x29, [sp, #16 * 14] .if \el == 0 clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 msr sp_el0, tsk /* * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions * when scheduling. */ ldr x19, [tsk, #TSK_TI_FLAGS] disable_step_tsk x19, x20 /* Check for asynchronous tag check faults in user space */ ldr x0, [tsk, THREAD_SCTLR_USER] check_mte_async_tcf x22, x23, x0 #ifdef CONFIG_ARM64_PTR_AUTH alternative_if ARM64_HAS_ADDRESS_AUTH /* * Enable IA for in-kernel PAC if the task had it disabled. Although * this could be implemented with an unconditional MRS which would avoid * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. * * Install the kernel IA key only if IA was enabled in the task. If IA * was disabled on kernel exit then we would have left the kernel IA * installed so there is no need to install it again. */ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 b 2f 1: mrs x0, sctlr_el1 orr x0, x0, SCTLR_ELx_ENIA msr sctlr_el1, x0 2: alternative_else_nop_endif #endif apply_ssbd 1, x22, x23 mte_set_kernel_gcr x22, x23 /* * Any non-self-synchronizing system register updates required for * kernel entry should be placed before this point. */ alternative_if ARM64_MTE isb b 1f alternative_else_nop_endif alternative_if ARM64_HAS_ADDRESS_AUTH isb alternative_else_nop_endif 1: scs_load_current .else add x21, sp, #PT_REGS_SIZE get_current_task tsk .endif /* \el == 0 */ mrs x22, elr_el1 mrs x23, spsr_el1 stp lr, x21, [sp, #S_LR] /* * For exceptions from EL0, create a final frame record. * For exceptions from EL1, create a synthetic frame record so the * interrupted code shows up in the backtrace. */ .if \el == 0 stp xzr, xzr, [sp, #S_STACKFRAME] .else stp x29, x22, [sp, #S_STACKFRAME] .endif add x29, sp, #S_STACKFRAME #ifdef CONFIG_ARM64_SW_TTBR0_PAN alternative_if_not ARM64_HAS_PAN bl __swpan_entry_el\el alternative_else_nop_endif #endif stp x22, x23, [sp, #S_PC] /* Not in a syscall by default (el0_svc overwrites for real syscall) */ .if \el == 0 mov w21, #NO_SYSCALL str w21, [sp, #S_SYSCALLNO] .endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Save pmr */ alternative_if ARM64_HAS_IRQ_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET msr_s SYS_ICC_PMR_EL1, x20 alternative_else_nop_endif #endif /* * Registers that may be useful after this macro is invoked: * * x20 - ICC_PMR_EL1 * x21 - aborted SP * x22 - aborted PC * x23 - aborted PSTATE */ .endm .macro kernel_exit, el .if \el != 0 disable_daif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Restore pmr */ alternative_if ARM64_HAS_IRQ_PRIO_MASKING ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 mrs_s x21, SYS_ICC_CTLR_EL1 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE dsb sy // Ensure priority change is seen by redistributor .L__skip_pmr_sync\@: alternative_else_nop_endif #endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR #ifdef CONFIG_ARM64_SW_TTBR0_PAN alternative_if_not ARM64_HAS_PAN bl __swpan_exit_el\el alternative_else_nop_endif #endif .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 tst x22, #PSR_MODE32_BIT // native task? b.eq 3f #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 msr contextidr_el1, x29 #else msr contextidr_el1, xzr #endif alternative_else_nop_endif #endif 3: scs_save tsk /* Ignore asynchronous tag check faults in the uaccess routines */ ldr x0, [tsk, THREAD_SCTLR_USER] clear_mte_async_tcf x0 #ifdef CONFIG_ARM64_PTR_AUTH alternative_if ARM64_HAS_ADDRESS_AUTH /* * IA was enabled for in-kernel PAC. Disable it now if needed, or * alternatively install the user's IA. All other per-task keys and * SCTLR bits were updated on task switch. * * No kernel C function calls after this. */ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f __ptrauth_keys_install_user tsk, x0, x1, x2 b 2f 1: mrs x0, sctlr_el1 bic x0, x0, SCTLR_ELx_ENIA msr sctlr_el1, x0 2: alternative_else_nop_endif #endif mte_set_user_gcr tsk, x0, x1 apply_ssbd 0, x0, x1 .endif msr elr_el1, x21 // set up the return data msr spsr_el1, x22 ldp x0, x1, [sp, #16 * 0] ldp x2, x3, [sp, #16 * 1] ldp x4, x5, [sp, #16 * 2] ldp x6, x7, [sp, #16 * 3] ldp x8, x9, [sp, #16 * 4] ldp x10, x11, [sp, #16 * 5] ldp x12, x13, [sp, #16 * 6] ldp x14, x15, [sp, #16 * 7] ldp x16, x17, [sp, #16 * 8] ldp x18, x19, [sp, #16 * 9] ldp x20, x21, [sp, #16 * 10] ldp x22, x23, [sp, #16 * 11] ldp x24, x25, [sp, #16 * 12] ldp x26, x27, [sp, #16 * 13] ldp x28, x29, [sp, #16 * 14] .if \el == 0 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp eret alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 bne 4f msr far_el1, x29 tramp_alias x30, tramp_exit_native, x29 br x30 4: tramp_alias x30, tramp_exit_compat, x29 br x30 #endif .else ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp /* Ensure any device/NC reads complete */ alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 eret .endif sb .endm #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Set the TTBR0 PAN bit in SPSR. When the exception is taken from * EL0, there is no need to check the state of TTBR0_EL1 since * accesses are always enabled. * Note that the meaning of this bit differs from the ARMv8.1 PAN * feature as all TTBR0_EL1 accesses are disabled, not just those to * user mappings. */ SYM_CODE_START_LOCAL(__swpan_entry_el1) mrs x21, ttbr0_el1 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL) __uaccess_ttbr0_disable x21 1: ret SYM_CODE_END(__swpan_entry_el1) /* * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR * PAN bit checking. */ SYM_CODE_START_LOCAL(__swpan_exit_el1) tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set __uaccess_ttbr0_enable x0, x1 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit ret SYM_CODE_END(__swpan_exit_el1) SYM_CODE_START_LOCAL(__swpan_exit_el0) __uaccess_ttbr0_enable x0, x1 /* * Enable errata workarounds only if returning to user. The only * workaround currently required for TTBR0_EL1 changes are for the * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * corruption). */ b post_ttbr_update_workaround SYM_CODE_END(__swpan_exit_el0) #endif /* GPRs used by entry code */ tsk .req x28 // current thread_info .text /* * Exception vectors. */ .pushsection ".entry.text", "ax" .align 11 SYM_CODE_START(vectors) kernel_ventry 1, t, 64, sync // Synchronous EL1t kernel_ventry 1, t, 64, irq // IRQ EL1t kernel_ventry 1, t, 64, fiq // FIQ EL1t kernel_ventry 1, t, 64, error // Error EL1t kernel_ventry 1, h, 64, sync // Synchronous EL1h kernel_ventry 1, h, 64, irq // IRQ EL1h kernel_ventry 1, h, 64, fiq // FIQ EL1h kernel_ventry 1, h, 64, error // Error EL1h kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 kernel_ventry 0, t, 64, error // Error 64-bit EL0 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) #ifdef CONFIG_VMAP_STACK SYM_CODE_START_LOCAL(__bad_stack) /* * We detected an overflow in kernel_ventry, which switched to the * overflow stack. Stash the exception regs, and head to our overflow * handler. */ /* Restore the original x0 value */ mrs x0, tpidrro_el0 /* * Store the original GPRs to the new stack. The orginal SP (minus * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry. */ sub sp, sp, #PT_REGS_SIZE kernel_entry 1 mrs x0, tpidr_el0 add x0, x0, #PT_REGS_SIZE str x0, [sp, #S_SP] /* Stash the regs for handle_bad_stack */ mov x0, sp /* Time to die */ bl handle_bad_stack ASM_BUG() SYM_CODE_END(__bad_stack) #endif /* CONFIG_VMAP_STACK */ .macro entry_handler el:req, ht:req, regsize:req, label:req SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) kernel_entry \el, \regsize mov x0, sp bl el\el\ht\()_\regsize\()_\label\()_handler .if \el == 0 b ret_to_user .else b ret_to_kernel .endif SYM_CODE_END(el\el\ht\()_\regsize\()_\label) .endm /* * Early exception handlers */ entry_handler 1, t, 64, sync entry_handler 1, t, 64, irq entry_handler 1, t, 64, fiq entry_handler 1, t, 64, error entry_handler 1, h, 64, sync entry_handler 1, h, 64, irq entry_handler 1, h, 64, fiq entry_handler 1, h, 64, error entry_handler 0, t, 64, sync entry_handler 0, t, 64, irq entry_handler 0, t, 64, fiq entry_handler 0, t, 64, error entry_handler 0, t, 32, sync entry_handler 0, t, 32, irq entry_handler 0, t, 32, fiq entry_handler 0, t, 32, error SYM_CODE_START_LOCAL(ret_to_kernel) kernel_exit 1 SYM_CODE_END(ret_to_kernel) SYM_CODE_START_LOCAL(ret_to_user) ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step enable_step_tsk x19, x2 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK bl stackleak_erase_on_task_stack #endif kernel_exit 0 SYM_CODE_END(ret_to_user) .popsection // .entry.text // Move from tramp_pg_dir to swapper_pg_dir .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 /* ASID already in \tmp[63:48] */ movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) /* 2MB boundary containing the vectors, so we nobble the walk cache */ movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) isb tlbi vae1, \tmp dsb nsh alternative_else_nop_endif #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ .endm // Move from swapper_pg_dir to tramp_pg_dir .macro tramp_unmap_kernel, tmp mrs \tmp, ttbr1_el1 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* * We avoid running the post_ttbr_update_workaround here because * it's only needed by Cavium ThunderX, which requires KPTI to be * disabled. */ .endm .macro tramp_data_read_var dst, var #ifdef CONFIG_RELOCATABLE ldr \dst, .L__tramp_data_\var .ifndef .L__tramp_data_\var .pushsection ".entry.tramp.rodata", "a", %progbits .align 3 .L__tramp_data_\var: .quad \var .popsection .endif #else /* * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a * compile time constant (and hence not secret and not worth hiding). * * As statically allocated kernel code and data always live in the top * 47 bits of the address space we can sign-extend bit 47 and avoid an * instruction to load the upper 16 bits (which must be 0xFFFF). */ movz \dst, :abs_g2_s:\var movk \dst, :abs_g1_nc:\var movk \dst, :abs_g0_nc:\var #endif .endm #define BHB_MITIGATION_NONE 0 #define BHB_MITIGATION_LOOP 1 #define BHB_MITIGATION_FW 2 #define BHB_MITIGATION_INSN 3 .macro tramp_ventry, vector_start, regsize, kpti, bhb .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif .if \bhb == BHB_MITIGATION_LOOP /* * This sequence must appear before the first indirect branch. i.e. the * ret out of tramp_ventry. It appears here because x30 is free. */ __mitigate_spectre_bhb_loop x30 .endif // \bhb == BHB_MITIGATION_LOOP .if \bhb == BHB_MITIGATION_INSN clearbhb isb .endif // \bhb == BHB_MITIGATION_INSN .if \kpti == 1 /* * Defend against branch aliasing attacks by pushing a dummy * entry onto the return stack and using a RET instruction to * enter the full-fat kernel vectors. */ bl 2f b . 2: tramp_map_kernel x30 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 tramp_data_read_var x30, vectors alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM prfm plil1strm, [x30, #(1b - \vector_start)] alternative_else_nop_endif msr vbar_el1, x30 isb .else adr_l x30, vectors .endif // \kpti == 1 .if \bhb == BHB_MITIGATION_FW /* * The firmware sequence must appear before the first indirect branch. * i.e. the ret out of tramp_ventry. But it also needs the stack to be * mapped to save/restore the registers the SMC clobbers. */ __mitigate_spectre_bhb_fw .endif // \bhb == BHB_MITIGATION_FW add x30, x30, #(1b - \vector_start + 4) ret .org 1b + 128 // Did we overflow the ventry slot? .endm .macro tramp_exit, regsize = 64 tramp_data_read_var x30, this_cpu_vector get_this_cpu_offset x29 ldr x30, [x30, x29] msr vbar_el1, x30 ldr lr, [sp, #S_LR] tramp_unmap_kernel x29 .if \regsize == 64 mrs x29, far_el1 .endif add sp, sp, #PT_REGS_SIZE // restore sp eret sb .endm .macro generate_tramp_vector, kpti, bhb .Lvector_start\@: .space 0x400 .rept 4 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb .endr .rept 4 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb .endr .endm #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 /* * Exception vectors trampoline. * The order must match __bp_harden_el1_vectors and the * arm64_bp_harden_el1_vectors enum. */ .pushsection ".entry.tramp.text", "ax" .align 11 SYM_CODE_START_NOALIGN(tramp_vectors) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE SYM_CODE_END(tramp_vectors) SYM_CODE_START(tramp_exit_native) tramp_exit SYM_CODE_END(tramp_exit_native) SYM_CODE_START(tramp_exit_compat) tramp_exit 32 SYM_CODE_END(tramp_exit_compat) .popsection // .entry.tramp.text #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* * Exception vectors for spectre mitigations on entry from EL1 when * kpti is not in use. */ .macro generate_el1_vector, bhb .Lvector_start\@: kernel_ventry 1, t, 64, sync // Synchronous EL1t kernel_ventry 1, t, 64, irq // IRQ EL1t kernel_ventry 1, t, 64, fiq // FIQ EL1h kernel_ventry 1, t, 64, error // Error EL1t kernel_ventry 1, h, 64, sync // Synchronous EL1h kernel_ventry 1, h, 64, irq // IRQ EL1h kernel_ventry 1, h, 64, fiq // FIQ EL1h kernel_ventry 1, h, 64, error // Error EL1h .rept 4 tramp_ventry .Lvector_start\@, 64, 0, \bhb .endr .rept 4 tramp_ventry .Lvector_start\@, 32, 0, \bhb .endr .endm /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ .pushsection ".entry.text", "ax" .align 11 SYM_CODE_START(__bp_harden_el1_vectors) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY generate_el1_vector bhb=BHB_MITIGATION_LOOP generate_el1_vector bhb=BHB_MITIGATION_FW generate_el1_vector bhb=BHB_MITIGATION_INSN #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ SYM_CODE_END(__bp_harden_el1_vectors) .popsection /* * Register switch for AArch64. The callee-saved registers need to be saved * and restored. On entry: * x0 = previous task_struct (must be preserved across the switch) * x1 = next task_struct * Previous and next are guaranteed not to be the same. * */ SYM_FUNC_START(cpu_switch_to) mov x10, #THREAD_CPU_CONTEXT add x8, x0, x10 mov x9, sp stp x19, x20, [x8], #16 // store callee-saved registers stp x21, x22, [x8], #16 stp x23, x24, [x8], #16 stp x25, x26, [x8], #16 stp x27, x28, [x8], #16 stp x29, x9, [x8], #16 str lr, [x8] add x8, x1, x10 ldp x19, x20, [x8], #16 // restore callee-saved registers ldp x21, x22, [x8], #16 ldp x23, x24, [x8], #16 ldp x25, x26, [x8], #16 ldp x27, x28, [x8], #16 ldp x29, x9, [x8], #16 ldr lr, [x8] mov sp, x9 msr sp_el0, x1 ptrauth_keys_install_kernel x1, x8, x9, x10 scs_save x0 scs_load_current ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) /* * This is how we return from a fork. */ SYM_CODE_START(ret_from_fork) bl schedule_tail cbz x19, 1f // not a kernel thread mov x0, x20 blr x19 1: get_current_task tsk mov x0, sp bl asm_exit_to_user_mode b ret_to_user SYM_CODE_END(ret_from_fork) NOKPROBE(ret_from_fork) /* * void call_on_irq_stack(struct pt_regs *regs, * void (*func)(struct pt_regs *)); * * Calls func(regs) using this CPU's irq stack and shadow irq stack. */ SYM_FUNC_START(call_on_irq_stack) #ifdef CONFIG_SHADOW_CALL_STACK get_current_task x16 scs_save x16 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 #endif /* Create a frame record to save our LR and SP (implicit in FP) */ stp x29, x30, [sp, #-16]! mov x29, sp ldr_this_cpu x16, irq_stack_ptr, x17 /* Move to the new stack and call the function there */ add sp, x16, #IRQ_STACK_SIZE blr x1 /* * Restore the SP from the FP, and restore the FP and LR from the frame * record. */ mov sp, x29 ldp x29, x30, [sp], #16 scs_load_current ret SYM_FUNC_END(call_on_irq_stack) NOKPROBE(call_on_irq_stack) #ifdef CONFIG_ARM_SDE_INTERFACE #include <asm/sdei.h> #include <uapi/linux/arm_sdei.h> .macro sdei_handler_exit exit_mode /* On success, this call never returns... */ cmp \exit_mode, #SDEI_EXIT_SMC b.ne 99f smc #0 b . 99: hvc #0 b . .endm #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 /* * The regular SDEI entry point may have been unmapped along with the rest of * the kernel. This trampoline restores the kernel mapping to make the x1 memory * argument accessible. * * This clobbers x4, __sdei_handler() will restore this from firmware's * copy. */ .pushsection ".entry.tramp.text", "ax" SYM_CODE_START(__sdei_asm_entry_trampoline) mrs x4, ttbr1_el1 tbz x4, #USER_ASID_BIT, 1f tramp_map_kernel tmp=x4 isb mov x4, xzr /* * Remember whether to unmap the kernel on exit. */ 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] tramp_data_read_var x4, __sdei_asm_handler br x4 SYM_CODE_END(__sdei_asm_entry_trampoline) NOKPROBE(__sdei_asm_entry_trampoline) /* * Make the exit call and restore the original ttbr1_el1 * * x0 & x1: setup for the exit API call * x2: exit_mode * x4: struct sdei_registered_event argument from registration time. */ SYM_CODE_START(__sdei_asm_exit_trampoline) ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] cbnz x4, 1f tramp_unmap_kernel tmp=x4 1: sdei_handler_exit exit_mode=x2 SYM_CODE_END(__sdei_asm_exit_trampoline) NOKPROBE(__sdei_asm_exit_trampoline) .popsection // .entry.tramp.text #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* * Software Delegated Exception entry point. * * x0: Event number * x1: struct sdei_registered_event argument from registration time. * x2: interrupted PC * x3: interrupted PSTATE * x4: maybe clobbered by the trampoline * * Firmware has preserved x0->x17 for us, we must save/restore the rest to * follow SMC-CC. We save (or retrieve) all the registers as the handler may * want them. */ SYM_CODE_START(__sdei_asm_handler) stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] mov x4, sp stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] mov x19, x1 /* Store the registered-event for crash_smp_send_stop() */ ldrb w4, [x19, #SDEI_EVENT_PRIORITY] cbnz w4, 1f adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 b 2f 1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 2: str x19, [x5] #ifdef CONFIG_VMAP_STACK /* * entry.S may have been using sp as a scratch register, find whether * this is a normal or critical event and switch to the appropriate * stack for this CPU. */ cbnz w4, 1f ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 b 2f 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 2: mov x6, #SDEI_STACK_SIZE add x5, x5, x6 mov sp, x5 #endif #ifdef CONFIG_SHADOW_CALL_STACK /* Use a separate shadow call stack for normal and critical events */ cbnz w4, 3f ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 b 4f 3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 4: #endif /* * We may have interrupted userspace, or a guest, or exit-from or * return-to either of these. We can't trust sp_el0, restore it. */ mrs x28, sp_el0 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 msr sp_el0, x0 /* If we interrupted the kernel point to the previous stack/frame. */ and x0, x3, #0xc mrs x1, CurrentEL cmp x0, x1 csel x29, x29, xzr, eq // fp, or zero csel x4, x2, xzr, eq // elr, or zero stp x29, x4, [sp, #-16]! mov x29, sp add x0, x19, #SDEI_EVENT_INTREGS mov x1, x19 bl __sdei_handler msr sp_el0, x28 /* restore regs >x17 that we clobbered */ mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] mov sp, x1 mov x1, x0 // address to complete_and_resume /* x0 = (x0 <= SDEI_EV_FAILED) ? * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */ cmp x0, #SDEI_EV_FAILED mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME csel x0, x2, x3, ls ldr_l x2, sdei_exit_mode /* Clear the registered-event seen by crash_smp_send_stop() */ ldrb w3, [x4, #SDEI_EVENT_PRIORITY] cbnz w3, 1f adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 b 2f 1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 2: str xzr, [x5] alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 sdei_handler_exit exit_mode=x2 alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 br x5 #endif SYM_CODE_END(__sdei_asm_handler) NOKPROBE(__sdei_asm_handler) SYM_CODE_START(__sdei_handler_abort) mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME adr x1, 1f ldr_l x2, sdei_exit_mode sdei_handler_exit exit_mode=x2 // exit the handler and jump to the next instruction. // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx. 1: ret SYM_CODE_END(__sdei_handler_abort) NOKPROBE(__sdei_handler_abort) #endif /* CONFIG_ARM_SDE_INTERFACE */
aixcc-public/challenge-001-exemplar-source
2,274
arch/arm64/kernel/entry-fpsimd.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * FP/SIMD state saving and restoring * * Copyright (C) 2012 ARM Ltd. * Author: Catalin Marinas <catalin.marinas@arm.com> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/fpsimdmacros.h> /* * Save the FP registers. * * x0 - pointer to struct fpsimd_state */ SYM_FUNC_START(fpsimd_save_state) fpsimd_save x0, 8 ret SYM_FUNC_END(fpsimd_save_state) /* * Load the FP registers. * * x0 - pointer to struct fpsimd_state */ SYM_FUNC_START(fpsimd_load_state) fpsimd_restore x0, 8 ret SYM_FUNC_END(fpsimd_load_state) #ifdef CONFIG_ARM64_SVE /* * Save the SVE state * * x0 - pointer to buffer for state * x1 - pointer to storage for FPSR * x2 - Save FFR if non-zero */ SYM_FUNC_START(sve_save_state) sve_save 0, x1, x2, 3 ret SYM_FUNC_END(sve_save_state) /* * Load the SVE state * * x0 - pointer to buffer for state * x1 - pointer to storage for FPSR * x2 - Restore FFR if non-zero */ SYM_FUNC_START(sve_load_state) sve_load 0, x1, x2, 4 ret SYM_FUNC_END(sve_load_state) SYM_FUNC_START(sve_get_vl) _sve_rdvl 0, 1 ret SYM_FUNC_END(sve_get_vl) SYM_FUNC_START(sve_set_vq) sve_load_vq x0, x1, x2 ret SYM_FUNC_END(sve_set_vq) /* * Zero all SVE registers but the first 128-bits of each vector * * VQ must already be configured by caller, any further updates of VQ * will need to ensure that the register state remains valid. * * x0 = include FFR? * x1 = VQ - 1 */ SYM_FUNC_START(sve_flush_live) cbz x1, 1f // A VQ-1 of 0 is 128 bits so no extra Z state sve_flush_z 1: sve_flush_p tbz x0, #0, 2f sve_flush_ffr 2: ret SYM_FUNC_END(sve_flush_live) #endif /* CONFIG_ARM64_SVE */ #ifdef CONFIG_ARM64_SME SYM_FUNC_START(sme_get_vl) _sme_rdsvl 0, 1 ret SYM_FUNC_END(sme_get_vl) SYM_FUNC_START(sme_set_vq) sme_load_vq x0, x1, x2 ret SYM_FUNC_END(sme_set_vq) /* * Save the SME state * * x0 - pointer to buffer for state */ SYM_FUNC_START(za_save_state) _sme_rdsvl 1, 1 // x1 = VL/8 sme_save_za 0, x1, 12 ret SYM_FUNC_END(za_save_state) /* * Load the SME state * * x0 - pointer to buffer for state */ SYM_FUNC_START(za_load_state) _sme_rdsvl 1, 1 // x1 = VL/8 sme_load_za 0, x1, 12 ret SYM_FUNC_END(za_load_state) #endif /* CONFIG_ARM64_SME */
aixcc-public/challenge-001-exemplar-source
9,371
arch/arm64/kernel/entry-ftrace.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm64/kernel/entry-ftrace.S * * Copyright (C) 2013 Linaro Limited * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/ftrace.h> #include <asm/insn.h> #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /* * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before * the regular function prologue. For an enabled callsite, ftrace_init_nop() and * ftrace_make_call() have patched those NOPs to: * * MOV X9, LR * BL <entry> * * ... where <entry> is either ftrace_caller or ftrace_regs_caller. * * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to * clobber. * * We save the callsite's context into a pt_regs before invoking any ftrace * callbacks. So that we can get a sensible backtrace, we create a stack record * for the callsite and the ftrace entry assembly. This is not sufficient for * reliable stacktrace: until we create the callsite stack record, its caller * is missing from the LR and existing chain of frame records. */ .macro ftrace_regs_entry, allregs=0 /* Make room for pt_regs, plus a callee frame */ sub sp, sp, #(PT_REGS_SIZE + 16) /* Save function arguments (and x9 for simplicity) */ stp x0, x1, [sp, #S_X0] stp x2, x3, [sp, #S_X2] stp x4, x5, [sp, #S_X4] stp x6, x7, [sp, #S_X6] stp x8, x9, [sp, #S_X8] /* Optionally save the callee-saved registers, always save the FP */ .if \allregs == 1 stp x10, x11, [sp, #S_X10] stp x12, x13, [sp, #S_X12] stp x14, x15, [sp, #S_X14] stp x16, x17, [sp, #S_X16] stp x18, x19, [sp, #S_X18] stp x20, x21, [sp, #S_X20] stp x22, x23, [sp, #S_X22] stp x24, x25, [sp, #S_X24] stp x26, x27, [sp, #S_X26] stp x28, x29, [sp, #S_X28] .else str x29, [sp, #S_FP] .endif /* Save the callsite's SP and LR */ add x10, sp, #(PT_REGS_SIZE + 16) stp x9, x10, [sp, #S_LR] /* Save the PC after the ftrace callsite */ str x30, [sp, #S_PC] /* Create a frame record for the callsite above pt_regs */ stp x29, x9, [sp, #PT_REGS_SIZE] add x29, sp, #PT_REGS_SIZE /* Create our frame record within pt_regs. */ stp x29, x30, [sp, #S_STACKFRAME] add x29, sp, #S_STACKFRAME .endm SYM_CODE_START(ftrace_regs_caller) bti c ftrace_regs_entry 1 b ftrace_common SYM_CODE_END(ftrace_regs_caller) SYM_CODE_START(ftrace_caller) bti c ftrace_regs_entry 0 b ftrace_common SYM_CODE_END(ftrace_caller) SYM_CODE_START(ftrace_common) sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) mov x1, x9 // parent_ip (callsite's LR) ldr_l x2, function_trace_op // op mov x3, sp // regs SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) bl ftrace_stub /* * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved * x19-x29 per the AAPCS, and we created frame records upon entry, so we need * to restore x0-x8, x29, and x30. */ /* Restore function arguments */ ldp x0, x1, [sp] ldp x2, x3, [sp, #S_X2] ldp x4, x5, [sp, #S_X4] ldp x6, x7, [sp, #S_X6] ldr x8, [sp, #S_X8] /* Restore the callsite's FP, LR, PC */ ldr x29, [sp, #S_FP] ldr x30, [sp, #S_LR] ldr x9, [sp, #S_PC] /* Restore the callsite's SP */ add sp, sp, #PT_REGS_SIZE + 16 ret x9 SYM_CODE_END(ftrace_common) #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ /* * Gcc with -pg will put the following code in the beginning of each function: * mov x0, x30 * bl _mcount * [function's body ...] * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic * ftrace is enabled. * * Please note that x0 as an argument will not be used here because we can * get lr(x30) of instrumented function at any time by winding up call stack * as long as the kernel is compiled without -fomit-frame-pointer. * (or CONFIG_FRAME_POINTER, this is forced on arm64) * * stack layout after mcount_enter in _mcount(): * * current sp/fp => 0:+-----+ * in _mcount() | x29 | -> instrumented function's fp * +-----+ * | x30 | -> _mcount()'s lr (= instrumented function's pc) * old sp => +16:+-----+ * when instrumented | | * function calls | ... | * _mcount() | | * | | * instrumented => +xx:+-----+ * function's fp | x29 | -> parent's fp * +-----+ * | x30 | -> instrumented function's lr (= parent's pc) * +-----+ * | ... | */ .macro mcount_enter stp x29, x30, [sp, #-16]! mov x29, sp .endm .macro mcount_exit ldp x29, x30, [sp], #16 ret .endm .macro mcount_adjust_addr rd, rn sub \rd, \rn, #AARCH64_INSN_SIZE .endm /* for instrumented function's parent */ .macro mcount_get_parent_fp reg ldr \reg, [x29] ldr \reg, [\reg] .endm /* for instrumented function */ .macro mcount_get_pc0 reg mcount_adjust_addr \reg, x30 .endm .macro mcount_get_pc reg ldr \reg, [x29, #8] mcount_adjust_addr \reg, \reg .endm .macro mcount_get_lr reg ldr \reg, [x29] ldr \reg, [\reg, #8] .endm .macro mcount_get_lr_addr reg ldr \reg, [x29] add \reg, \reg, #8 .endm #ifndef CONFIG_DYNAMIC_FTRACE /* * void _mcount(unsigned long return_address) * @return_address: return address to instrumented function * * This function makes calls, if enabled, to: * - tracer function to probe instrumented function's entry, * - ftrace_graph_caller to set up an exit hook */ SYM_FUNC_START(_mcount) mcount_enter ldr_l x2, ftrace_trace_function adr x0, ftrace_stub cmp x0, x2 // if (ftrace_trace_function b.eq skip_ftrace_call // != ftrace_stub) { mcount_get_pc x0 // function's pc mcount_get_lr x1 // function's lr (= parent's pc) blr x2 // (*ftrace_trace_function)(pc, lr); skip_ftrace_call: // } #ifdef CONFIG_FUNCTION_GRAPH_TRACER ldr_l x2, ftrace_graph_return cmp x0, x2 // if ((ftrace_graph_return b.ne ftrace_graph_caller // != ftrace_stub) ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) cmp x0, x2 b.ne ftrace_graph_caller // ftrace_graph_caller(); #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ mcount_exit SYM_FUNC_END(_mcount) EXPORT_SYMBOL(_mcount) NOKPROBE(_mcount) #else /* CONFIG_DYNAMIC_FTRACE */ /* * _mcount() is used to build the kernel with -pg option, but all the branch * instructions to _mcount() are replaced to NOP initially at kernel start up, * and later on, NOP to branch to ftrace_caller() when enabled or branch to * NOP when disabled per-function base. */ SYM_FUNC_START(_mcount) ret SYM_FUNC_END(_mcount) EXPORT_SYMBOL(_mcount) NOKPROBE(_mcount) /* * void ftrace_caller(unsigned long return_address) * @return_address: return address to instrumented function * * This function is a counterpart of _mcount() in 'static' ftrace, and * makes calls to: * - tracer function to probe instrumented function's entry, * - ftrace_graph_caller to set up an exit hook */ SYM_FUNC_START(ftrace_caller) mcount_enter mcount_get_pc0 x0 // function's pc mcount_get_lr x1 // function's lr SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr); nop // This will be replaced with "bl xxx" // where xxx can be any kind of tracer. #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller(); nop // If enabled, this will be replaced // "b ftrace_graph_caller" #endif mcount_exit SYM_FUNC_END(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * void ftrace_graph_caller(void) * * Called from _mcount() or ftrace_caller() when function_graph tracer is * selected. * This function w/ prepare_ftrace_return() fakes link register's value on * the call stack in order to intercept instrumented function's return path * and run return_to_handler() later on its exit. */ SYM_FUNC_START(ftrace_graph_caller) mcount_get_pc x0 // function's pc mcount_get_lr_addr x1 // pointer to function's saved lr mcount_get_parent_fp x2 // parent's fp bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp) mcount_exit SYM_FUNC_END(ftrace_graph_caller) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ SYM_TYPED_FUNC_START(ftrace_stub) ret SYM_FUNC_END(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_TYPED_FUNC_START(ftrace_stub_graph) ret SYM_FUNC_END(ftrace_stub_graph) /* * void return_to_handler(void) * * Run ftrace_return_to_handler() before going back to parent. * @fp is checked against the value passed by ftrace_graph_caller(). */ SYM_CODE_START(return_to_handler) /* save return value regs */ sub sp, sp, #64 stp x0, x1, [sp] stp x2, x3, [sp, #16] stp x4, x5, [sp, #32] stp x6, x7, [sp, #48] mov x0, x29 // parent's fp bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address /* restore return value regs */ ldp x0, x1, [sp] ldp x2, x3, [sp, #16] ldp x4, x5, [sp, #32] ldp x6, x7, [sp, #48] add sp, sp, #64 ret SYM_CODE_END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
aixcc-public/challenge-001-exemplar-source
1,348
arch/arm64/kernel/sigreturn32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * AArch32 sigreturn code. * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. * * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net> * Copyright (C) 2012-2018 ARM Ltd. * * For ARM syscalls, the syscall number has to be loaded into r7. * We do not support an OABI userspace. * * For Thumb syscalls, we also pass the syscall number via r7. We therefore * need two 16-bit instructions. */ #include <asm/unistd.h> .section .rodata .globl __aarch32_sigret_code_start __aarch32_sigret_code_start: /* * ARM Code */ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn /* * Thumb code */ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn /* * ARM code */ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn /* * Thumb code */ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn .globl __aarch32_sigret_code_end __aarch32_sigret_code_end:
aixcc-public/challenge-001-exemplar-source
3,094
arch/arm64/kernel/relocate_kernel.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * kexec for arm64 * * Copyright (C) Linaro. * Copyright (C) Huawei Futurewei Technologies. * Copyright (C) 2021, Microsoft Corporation. * Pasha Tatashin <pasha.tatashin@soleen.com> */ #include <linux/kexec.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/kexec.h> #include <asm/page.h> #include <asm/sysreg.h> #include <asm/virt.h> .macro turn_off_mmu tmp1, tmp2 mov_q \tmp1, INIT_SCTLR_EL1_MMU_OFF pre_disable_mmu_workaround msr sctlr_el1, \tmp1 isb .endm .section ".kexec_relocate.text", "ax" /* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. * * The memory that the old kernel occupies may be overwritten when copying the * new image to its final location. To assure that the * arm64_relocate_new_kernel routine which does that copy is not overwritten, * all code and data needed by arm64_relocate_new_kernel must be between the * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec * safe memory that has been set up to be preserved during the copy operation. */ SYM_CODE_START(arm64_relocate_new_kernel) /* * The kimage structure isn't allocated specially and may be clobbered * during relocation. We must load any values we need from it prior to * any relocation occurring. */ ldr x28, [x0, #KIMAGE_START] ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS] ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM] /* Setup the list loop variables. */ ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */ ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */ ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */ ldr x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET] /* x22 phys_offset */ raw_dcache_line_size x15, x1 /* x15 = dcache line size */ break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */ .Lloop: and x12, x16, PAGE_MASK /* x12 = addr */ sub x12, x12, x22 /* Convert x12 to virt */ /* Test the entry flags. */ .Ltest_source: tbz x16, IND_SOURCE_BIT, .Ltest_indirection /* Invalidate dest page to PoC. */ mov x19, x13 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 add x1, x19, #PAGE_SIZE dcache_by_myline_op civac, sy, x19, x1, x15, x20 b .Lnext .Ltest_indirection: tbz x16, IND_INDIRECTION_BIT, .Ltest_destination mov x14, x12 /* ptr = addr */ b .Lnext .Ltest_destination: tbz x16, IND_DESTINATION_BIT, .Lnext mov x13, x12 /* dest = addr */ .Lnext: ldr x16, [x14], #8 /* entry = *ptr++ */ tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */ /* wait for writes from copy_page to finish */ dsb nsh ic iallu dsb nsh isb turn_off_mmu x12, x13 /* Start new image. */ cbz x27, .Lel1 mov x1, x28 /* kernel entry point */ mov x2, x26 /* dtb address */ mov x3, xzr mov x4, xzr mov x0, #HVC_SOFT_RESTART hvc #0 /* Jumps from el2 */ .Lel1: mov x0, x26 /* dtb address */ mov x1, xzr mov x2, xzr mov x3, xzr br x28 /* Jumps from el1 */ SYM_CODE_END(arm64_relocate_new_kernel)
aixcc-public/challenge-001-exemplar-source
1,889
arch/arm64/kernel/kuser32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * AArch32 user helpers. * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. * * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net> * Copyright (C) 2012-2018 ARM Ltd. * * The kuser helpers below are mapped at a fixed address by * aarch32_setup_additional_pages() and are provided for compatibility * reasons with 32 bit (aarch32) applications that need them. * * See Documentation/arm/kernel_user_helpers.rst for formal definitions. */ #include <asm/unistd.h> .section .rodata .align 5 .globl __kuser_helper_start __kuser_helper_start: __kuser_cmpxchg64: // 0xffff0f60 .inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c160d0 // ldrd r6, r7, [r1] .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] .inst 0xe0303004 // eors r3, r0, r4 .inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffff9 // beq 1b .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe12fff1e // bx lr .align 5 __kuser_memory_barrier: // 0xffff0fa0 .inst 0xf57ff05b // dmb ish .inst 0xe12fff1e // bx lr .align 5 __kuser_cmpxchg: // 0xffff0fc0 .inst 0xe1923f9f // 1: ldrex r3, [r2] .inst 0xe0533000 // subs r3, r3, r0 .inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffffa // beq 1b .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe12fff1e // bx lr .align 5 __kuser_get_tls: // 0xffff0fe0 .inst 0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3 .inst 0xe12fff1e // bx lr .rep 5 .word 0 .endr __kuser_helper_version: // 0xffff0ffc .word ((__kuser_helper_end - __kuser_helper_start) >> 5) .globl __kuser_helper_end __kuser_helper_end:
aixcc-public/challenge-001-exemplar-source
1,393
arch/arm64/kernel/cpu-reset.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * CPU reset routines * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Huawei Futurewei Technologies. */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/assembler.h> #include <asm/sysreg.h> #include <asm/virt.h> .text .pushsection .idmap.text, "awx" /* * cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) * * @el2_switch: Flag to indicate a switch to EL2 is needed. * @entry: Location to jump to for soft reset. * arg0: First argument passed to @entry. (relocation list) * arg1: Second argument passed to @entry.(physical kernel entry) * arg2: Third argument passed to @entry. (physical dtb address) * * Put the CPU into the same state as it would be if it had been reset, and * branch to what would be the reset vector. It must be executed with the * flat identity mapping. */ SYM_TYPED_FUNC_START(cpu_soft_restart) mov_q x12, INIT_SCTLR_EL1_MMU_OFF pre_disable_mmu_workaround /* * either disable EL1&0 translation regime or disable EL2&0 translation * regime if HCR_EL2.E2H == 1 */ msr sctlr_el1, x12 isb cbz x0, 1f // el2_switch? mov x0, #HVC_SOFT_RESTART hvc #0 // no return 1: mov x8, x1 // entry mov x0, x2 // arg0 mov x1, x3 // arg1 mov x2, x4 // arg2 br x8 SYM_FUNC_END(cpu_soft_restart) .popsection
aixcc-public/challenge-001-exemplar-source
2,092
arch/arm64/kernel/efi-rt-wrapper.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> SYM_FUNC_START(__efi_rt_asm_wrapper) stp x29, x30, [sp, #-112]! mov x29, sp /* * Register x18 is designated as the 'platform' register by the AAPCS, * which means firmware running at the same exception level as the OS * (such as UEFI) should never touch it. */ stp x1, x18, [sp, #16] /* * Preserve all callee saved registers and preserve the stack pointer * value at the base of the EFI runtime stack so we can recover from * synchronous exceptions occurring while executing the firmware * routines. */ stp x19, x20, [sp, #32] stp x21, x22, [sp, #48] stp x23, x24, [sp, #64] stp x25, x26, [sp, #80] stp x27, x28, [sp, #96] ldr_l x16, efi_rt_stack_top mov sp, x16 stp x18, x29, [sp, #-16]! /* * We are lucky enough that no EFI runtime services take more than * 5 arguments, so all are passed in registers rather than via the * stack. */ mov x8, x0 mov x0, x2 mov x1, x3 mov x2, x4 mov x3, x5 mov x4, x6 blr x8 mov x16, sp mov sp, x29 str xzr, [x16, #8] // clear recorded task SP value ldp x1, x2, [sp, #16] cmp x2, x18 ldp x29, x30, [sp], #112 b.ne 0f ret 0: /* * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a * shadow stack pointer, which we need to restore before returning to * potentially instrumented code. This is safe because the wrapper is * called with preemption disabled and a separate shadow stack is used * for interrupts. */ #ifdef CONFIG_SHADOW_CALL_STACK ldr_l x18, efi_rt_stack_top ldr x18, [x18, #-16] #endif b efi_handle_corrupted_x18 // tail call SYM_FUNC_END(__efi_rt_asm_wrapper) SYM_CODE_START(__efi_rt_asm_recover) mov sp, x30 ldr_l x16, efi_rt_stack_top // clear recorded task SP value str xzr, [x16, #-8] ldp x19, x20, [sp, #32] ldp x21, x22, [sp, #48] ldp x23, x24, [sp, #64] ldp x25, x26, [sp, #80] ldp x27, x28, [sp, #96] ldp x29, x30, [sp], #112 ret SYM_CODE_END(__efi_rt_asm_recover)
aixcc-public/challenge-001-exemplar-source
3,864
arch/arm64/kernel/smccc-call.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015, Linaro Limited */ #include <linux/linkage.h> #include <linux/arm-smccc.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/thread_info.h> /* * If we have SMCCC v1.3 and (as is likely) no SVE state in * the registers then set the SMCCC hint bit to say there's no * need to preserve it. Do this by directly adjusting the SMCCC * function value which is already stored in x0 ready to be called. */ SYM_FUNC_START(__arm_smccc_sve_check) ldr_l x16, smccc_has_sve_hint cbz x16, 2f get_current_task x16 ldr x16, [x16, #TSK_TI_FLAGS] tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state? tbnz x16, #TIF_SVE, 2f // Does that state include SVE? 1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT 2: ret SYM_FUNC_END(__arm_smccc_sve_check) EXPORT_SYMBOL(__arm_smccc_sve_check) .macro SMCCC instr stp x29, x30, [sp, #-16]! mov x29, sp alternative_if ARM64_SVE bl __arm_smccc_sve_check alternative_else_nop_endif \instr #0 ldr x4, [sp, #16] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] ldr x4, [sp, #24] cbz x4, 1f /* no quirk structure */ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] 1: ldp x29, x30, [sp], #16 ret .endm /* * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ SYM_FUNC_START(__arm_smccc_smc) SMCCC smc SYM_FUNC_END(__arm_smccc_smc) EXPORT_SYMBOL(__arm_smccc_smc) /* * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, * unsigned long a3, unsigned long a4, unsigned long a5, * unsigned long a6, unsigned long a7, struct arm_smccc_res *res, * struct arm_smccc_quirk *quirk) */ SYM_FUNC_START(__arm_smccc_hvc) SMCCC hvc SYM_FUNC_END(__arm_smccc_hvc) EXPORT_SYMBOL(__arm_smccc_hvc) .macro SMCCC_1_2 instr /* Save `res` and free a GPR that won't be clobbered */ stp x1, x19, [sp, #-16]! /* Ensure `args` won't be clobbered while loading regs in next step */ mov x19, x0 /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */ ldp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] ldp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] ldp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] ldp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] ldp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] ldp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] ldp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] ldp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] \instr #0 /* Load the `res` from the stack */ ldr x19, [sp] /* Store the registers x0 - x17 into the result structure */ stp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] stp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] stp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] stp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] stp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] stp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] stp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] stp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] /* Restore original x19 */ ldp xzr, x19, [sp], #16 ret .endm /* * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, * struct arm_smccc_1_2_regs *res); */ SYM_FUNC_START(arm_smccc_1_2_hvc) SMCCC_1_2 hvc SYM_FUNC_END(arm_smccc_1_2_hvc) EXPORT_SYMBOL(arm_smccc_1_2_hvc) /* * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, * struct arm_smccc_1_2_regs *res); */ SYM_FUNC_START(arm_smccc_1_2_smc) SMCCC_1_2 smc SYM_FUNC_END(arm_smccc_1_2_smc) EXPORT_SYMBOL(arm_smccc_1_2_smc)
aixcc-public/challenge-001-exemplar-source
1,510
arch/arm64/kernel/efi-entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * EFI entry point. * * Copyright (C) 2013, 2014 Red Hat, Inc. * Author: Mark Salter <msalter@redhat.com> */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> __INIT SYM_CODE_START(efi_enter_kernel) /* * efi_pe_entry() will have copied the kernel image if necessary and we * end up here with device tree address in x1 and the kernel entry * point stored in x0. Save those values in registers which are * callee preserved. */ ldr w2, =primary_entry_offset add x19, x0, x2 // relocated Image entrypoint mov x20, x1 // DTB address /* * Clean the copied Image to the PoC, and ensure it is not shadowed by * stale icache entries from before relocation. */ ldr w1, =kernel_size add x1, x0, x1 bl dcache_clean_poc ic ialluis /* * Clean the remainder of this routine to the PoC * so that we can safely disable the MMU and caches. */ adr x0, 0f adr x1, 3f bl dcache_clean_poc 0: /* Turn off Dcache and MMU */ mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.ne 1f mrs x0, sctlr_el2 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C pre_disable_mmu_workaround msr sctlr_el2, x0 isb b 2f 1: mrs x0, sctlr_el1 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C pre_disable_mmu_workaround msr sctlr_el1, x0 isb 2: /* Jump to kernel entry point */ mov x0, x20 mov x1, xzr mov x2, xzr mov x3, xzr br x19 3: SYM_CODE_END(efi_enter_kernel)
aixcc-public/challenge-001-exemplar-source
1,582
arch/arm64/kernel/reloc_test_syms.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> SYM_FUNC_START(absolute_data64) ldr x0, 0f ret 0: .quad sym64_abs SYM_FUNC_END(absolute_data64) SYM_FUNC_START(absolute_data32) ldr w0, 0f ret 0: .long sym32_abs SYM_FUNC_END(absolute_data32) SYM_FUNC_START(absolute_data16) adr x0, 0f ldrh w0, [x0] ret 0: .short sym16_abs, 0 SYM_FUNC_END(absolute_data16) SYM_FUNC_START(signed_movw) movz x0, #:abs_g2_s:sym64_abs movk x0, #:abs_g1_nc:sym64_abs movk x0, #:abs_g0_nc:sym64_abs ret SYM_FUNC_END(signed_movw) SYM_FUNC_START(unsigned_movw) movz x0, #:abs_g3:sym64_abs movk x0, #:abs_g2_nc:sym64_abs movk x0, #:abs_g1_nc:sym64_abs movk x0, #:abs_g0_nc:sym64_abs ret SYM_FUNC_END(unsigned_movw) .align 12 .space 0xff8 SYM_FUNC_START(relative_adrp) adrp x0, sym64_rel add x0, x0, #:lo12:sym64_rel ret SYM_FUNC_END(relative_adrp) .align 12 .space 0xffc SYM_FUNC_START(relative_adrp_far) adrp x0, memstart_addr add x0, x0, #:lo12:memstart_addr ret SYM_FUNC_END(relative_adrp_far) SYM_FUNC_START(relative_adr) adr x0, sym64_rel ret SYM_FUNC_END(relative_adr) SYM_FUNC_START(relative_data64) adr x1, 0f ldr x0, [x1] add x0, x0, x1 ret 0: .quad sym64_rel - . SYM_FUNC_END(relative_data64) SYM_FUNC_START(relative_data32) adr x1, 0f ldr w0, [x1] add x0, x0, x1 ret 0: .long sym64_rel - . SYM_FUNC_END(relative_data32) SYM_FUNC_START(relative_data16) adr x1, 0f ldrsh w0, [x1] add x0, x0, x1 ret 0: .short sym64_rel - ., 0 SYM_FUNC_END(relative_data16)
aixcc-public/challenge-001-exemplar-source
2,904
arch/arm64/kernel/hibernate-asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Hibernate low-level support * * Copyright (C) 2016 ARM Ltd. * Author: James Morse <james.morse@arm.com> */ #include <linux/linkage.h> #include <linux/errno.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/cputype.h> #include <asm/memory.h> #include <asm/page.h> #include <asm/virt.h> /* * Resume from hibernate * * Loads temporary page tables then restores the memory image. * Finally branches to cpu_resume() to restore the state saved by * swsusp_arch_suspend(). * * Because this code has to be copied to a 'safe' page, it can't call out to * other functions by PC-relative address. Also remember that it may be * mid-way through over-writing other functions. For this reason it contains * code from caches_clean_inval_pou() and uses the copy_page() macro. * * This 'safe' page is mapped via ttbr0, and executed from there. This function * switches to a copy of the linear map in ttbr1, performs the restore, then * switches ttbr1 to the original kernel's swapper_pg_dir. * * All of memory gets written to, including code. We need to clean the kernel * text to the Point of Coherence (PoC) before secondary cores can be booted. * Because the kernel modules and executable pages mapped to user space are * also written as data, we clean all pages we touch to the Point of * Unification (PoU). * * x0: physical address of temporary page tables * x1: physical address of swapper page tables * x2: address of cpu_resume * x3: linear map address of restore_pblist in the current kernel * x4: physical address of __hyp_stub_vectors, or 0 * x5: physical address of a zero page that remains zero after resume */ .pushsection ".hibernate_exit.text", "ax" SYM_CODE_START(swsusp_arch_suspend_exit) /* * We execute from ttbr0, change ttbr1 to our copied linear map tables * with a break-before-make via the zero page */ break_before_make_ttbr_switch x5, x0, x6, x8 mov x21, x1 mov x30, x2 mov x24, x4 mov x25, x5 /* walk the restore_pblist and use copy_page() to over-write memory */ mov x19, x3 1: ldr x10, [x19, #HIBERN_PBE_ORIG] mov x0, x10 ldr x1, [x19, #HIBERN_PBE_ADDR] copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 add x1, x10, #PAGE_SIZE /* Clean the copied page to PoU - based on caches_clean_inval_pou() */ raw_dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x10, x3 2: /* clean D line / unified line */ alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE add x4, x4, x2 cmp x4, x1 b.lo 2b ldr x19, [x19, #HIBERN_PBE_NEXT] cbnz x19, 1b dsb ish /* wait for PoU cleaning to finish */ /* switch to the restored kernels page tables */ break_before_make_ttbr_switch x25, x21, x6, x8 ic ialluis dsb ish isb cbz x24, 3f /* Do we need to re-initialise EL2? */ hvc #0 3: ret SYM_CODE_END(swsusp_arch_suspend_exit) .popsection
aixcc-public/challenge-001-exemplar-source
8,847
arch/arm64/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ld script to make ARM Linux kernel * taken from the i386 version by Russell King * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ #include <asm/hyp_image.h> #ifdef CONFIG_KVM #define HYPERVISOR_EXTABLE \ . = ALIGN(SZ_8); \ __start___kvm_ex_table = .; \ *(__kvm_ex_table) \ __stop___kvm_ex_table = .; #define HYPERVISOR_DATA_SECTIONS \ HYP_SECTION_NAME(.rodata) : { \ . = ALIGN(PAGE_SIZE); \ __hyp_rodata_start = .; \ *(HYP_SECTION_NAME(.data..ro_after_init)) \ *(HYP_SECTION_NAME(.rodata)) \ . = ALIGN(PAGE_SIZE); \ __hyp_rodata_end = .; \ } #define HYPERVISOR_PERCPU_SECTION \ . = ALIGN(PAGE_SIZE); \ HYP_SECTION_NAME(.data..percpu) : { \ *(HYP_SECTION_NAME(.data..percpu)) \ } #define HYPERVISOR_RELOC_SECTION \ .hyp.reloc : ALIGN(4) { \ __hyp_reloc_begin = .; \ *(.hyp.reloc) \ __hyp_reloc_end = .; \ } #define BSS_FIRST_SECTIONS \ __hyp_bss_start = .; \ *(HYP_SECTION_NAME(.bss)) \ . = ALIGN(PAGE_SIZE); \ __hyp_bss_end = .; /* * We require that __hyp_bss_start and __bss_start are aligned, and enforce it * with an assertion. But the BSS_SECTION macro places an empty .sbss section * between them, which can in some cases cause the linker to misalign them. To * work around the issue, force a page alignment for __bss_start. */ #define SBSS_ALIGN PAGE_SIZE #else /* CONFIG_KVM */ #define HYPERVISOR_EXTABLE #define HYPERVISOR_DATA_SECTIONS #define HYPERVISOR_PERCPU_SECTION #define HYPERVISOR_RELOC_SECTION #define SBSS_ALIGN 0 #endif #define RO_EXCEPTION_TABLE_ALIGN 4 #define RUNTIME_DISCARD_EXIT #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> #include <asm/kernel-pgtable.h> #include <asm/kexec.h> #include <asm/memory.h> #include <asm/page.h> #include "image.h" OUTPUT_ARCH(aarch64) ENTRY(_text) jiffies = jiffies_64; #define HYPERVISOR_TEXT \ . = ALIGN(PAGE_SIZE); \ __hyp_idmap_text_start = .; \ *(.hyp.idmap.text) \ __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ HYPERVISOR_EXTABLE \ . = ALIGN(PAGE_SIZE); \ __hyp_text_end = .; #define IDMAP_TEXT \ . = ALIGN(SZ_4K); \ __idmap_text_start = .; \ *(.idmap.text) \ __idmap_text_end = .; #ifdef CONFIG_HIBERNATION #define HIBERNATE_TEXT \ __hibernate_exit_text_start = .; \ *(.hibernate_exit.text) \ __hibernate_exit_text_end = .; #else #define HIBERNATE_TEXT #endif #ifdef CONFIG_KEXEC_CORE #define KEXEC_TEXT \ __relocate_new_kernel_start = .; \ *(.kexec_relocate.text) \ __relocate_new_kernel_end = .; #else #define KEXEC_TEXT #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #define TRAMP_TEXT \ . = ALIGN(PAGE_SIZE); \ __entry_tramp_text_start = .; \ *(.entry.tramp.text) \ . = ALIGN(PAGE_SIZE); \ __entry_tramp_text_end = .; \ *(.entry.tramp.rodata) #else #define TRAMP_TEXT #endif /* * The size of the PE/COFF section that covers the kernel image, which * runs from _stext to _edata, must be a round multiple of the PE/COFF * FileAlignment, which we set to its minimum value of 0x200. '_stext' * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned * boundary should be sufficient. */ PECOFF_FILE_ALIGNMENT = 0x200; #ifdef CONFIG_EFI #define PECOFF_EDATA_PADDING \ .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } #else #define PECOFF_EDATA_PADDING #endif SECTIONS { /* * XXX: The linker does not define how output sections are * assigned to input sections when there are multiple statements * matching the same input section name. There is no documented * order of matching. */ DISCARDS /DISCARD/ : { *(.interp .dynamic) *(.dynsym .dynstr .hash .gnu.hash) } . = KIMAGE_VADDR; .head.text : { _text = .; HEAD_TEXT } .text : ALIGN(SEGMENT_ALIGN) { /* Real text segment */ _stext = .; /* Text and read-only data */ IRQENTRY_TEXT SOFTIRQENTRY_TEXT ENTRY_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT HYPERVISOR_TEXT IDMAP_TEXT *(.gnu.warning) . = ALIGN(16); *(.got) /* Global offset table */ } /* * Make sure that the .got.plt is either completely empty or it * contains only the lazy dispatch entries. */ .got.plt : { *(.got.plt) } ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!") . = ALIGN(SEGMENT_ALIGN); _etext = .; /* End of text section */ /* everything from this point to __init_begin will be marked RO NX */ RO_DATA(PAGE_SIZE) HYPERVISOR_DATA_SECTIONS /* code sections that are never executed via the kernel mapping */ .rodata.text : { TRAMP_TEXT HIBERNATE_TEXT KEXEC_TEXT . = ALIGN(PAGE_SIZE); } idmap_pg_dir = .; . += PAGE_SIZE; #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_pg_dir = .; . += PAGE_SIZE; #endif reserved_pg_dir = .; . += PAGE_SIZE; swapper_pg_dir = .; . += PAGE_SIZE; . = ALIGN(SEGMENT_ALIGN); __init_begin = .; __inittext_begin = .; INIT_TEXT_SECTION(8) __exittext_begin = .; .exit.text : { EXIT_TEXT } __exittext_end = .; . = ALIGN(4); .altinstructions : { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } . = ALIGN(SEGMENT_ALIGN); __inittext_end = .; __initdata_begin = .; init_idmap_pg_dir = .; . += INIT_IDMAP_DIR_SIZE; init_idmap_pg_end = .; .init.data : { INIT_DATA INIT_SETUP(16) INIT_CALLS CON_INITCALL INIT_RAM_FS *(.init.altinstructions .init.bss) /* from the EFI stub */ } .exit.data : { EXIT_DATA } PERCPU_SECTION(L1_CACHE_BYTES) HYPERVISOR_PERCPU_SECTION HYPERVISOR_RELOC_SECTION .rela.dyn : ALIGN(8) { __rela_start = .; *(.rela .rela*) __rela_end = .; } .relr.dyn : ALIGN(8) { __relr_start = .; *(.relr.dyn) __relr_end = .; } . = ALIGN(SEGMENT_ALIGN); __initdata_end = .; __init_end = .; _data = .; _sdata = .; RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) /* * Data written with the MMU off but read with the MMU on requires * cache lines to be invalidated, discarding up to a Cache Writeback * Granule (CWG) of data from the cache. Keep the section that * requires this type of maintenance to be in its own Cache Writeback * Granule (CWG) area so the cache maintenance operations don't * interfere with adjacent data. */ .mmuoff.data.write : ALIGN(SZ_2K) { __mmuoff_data_start = .; *(.mmuoff.data.write) } . = ALIGN(SZ_2K); .mmuoff.data.read : { *(.mmuoff.data.read) __mmuoff_data_end = .; } PECOFF_EDATA_PADDING __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); _edata = .; BSS_SECTION(SBSS_ALIGN, 0, 0) . = ALIGN(PAGE_SIZE); init_pg_dir = .; . += INIT_DIR_SIZE; init_pg_end = .; . = ALIGN(SEGMENT_ALIGN); __pecoff_data_size = ABSOLUTE(. - __initdata_begin); _end = .; STABS_DEBUG DWARF_DEBUG ELF_DETAILS HEAD_SYMBOLS /* * Sections that should stay zero sized, which is safer to * explicitly check instead of blindly discarding. */ .plt : { *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) } ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") .data.rel.ro : { *(.data.rel.ro) } ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!") } #include "image-vars.h" /* * The HYP init code and ID map text can't be longer than a page each. The * former is page-aligned, but the latter may not be with 16K or 64K pages, so * it should also not cross a page boundary. */ ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE, "HYP init code too big") ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "ID map text too big or misaligned") #ifdef CONFIG_HIBERNATION ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K, "Hibernate exit text is bigger than 4 KiB") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, "Entry trampoline text too big") #endif #ifdef CONFIG_KVM ASSERT(__hyp_bss_start == __bss_start, "HYP and Host BSS are misaligned") #endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned") ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET, "RESERVED_SWAPPER_OFFSET is wrong!") #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, "TRAMP_SWAPPER_OFFSET is wrong!") #endif #ifdef CONFIG_KEXEC_CORE /* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */ ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K, "kexec relocation code is bigger than 4 KiB") ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken") #endif
aixcc-public/challenge-001-exemplar-source
4,661
arch/arm64/kernel/sleep.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/errno.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/smp.h> .text /* * Implementation of MPIDR_EL1 hash algorithm through shifting * and OR'ing. * * @dst: register containing hash result * @rs0: register containing affinity level 0 bit shift * @rs1: register containing affinity level 1 bit shift * @rs2: register containing affinity level 2 bit shift * @rs3: register containing affinity level 3 bit shift * @mpidr: register containing MPIDR_EL1 value * @mask: register containing MPIDR mask * * Pseudo C-code: * *u32 dst; * *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) { * u32 aff0, aff1, aff2, aff3; * u64 mpidr_masked = mpidr & mask; * aff0 = mpidr_masked & 0xff; * aff1 = mpidr_masked & 0xff00; * aff2 = mpidr_masked & 0xff0000; * aff3 = mpidr_masked & 0xff00000000; * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3); *} * Input registers: rs0, rs1, rs2, rs3, mpidr, mask * Output register: dst * Note: input and output registers must be disjoint register sets (eg: a macro instance with mpidr = x1 and dst = x1 is invalid) */ .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask and \mpidr, \mpidr, \mask // mask out MPIDR bits and \dst, \mpidr, #0xff // mask=aff0 lsr \dst ,\dst, \rs0 // dst=aff0>>rs0 and \mask, \mpidr, #0xff00 // mask = aff1 lsr \mask ,\mask, \rs1 orr \dst, \dst, \mask // dst|=(aff1>>rs1) and \mask, \mpidr, #0xff0000 // mask = aff2 lsr \mask ,\mask, \rs2 orr \dst, \dst, \mask // dst|=(aff2>>rs2) and \mask, \mpidr, #0xff00000000 // mask = aff3 lsr \mask ,\mask, \rs3 orr \dst, \dst, \mask // dst|=(aff3>>rs3) .endm /* * Save CPU state in the provided sleep_stack_data area, and publish its * location for cpu_resume()'s use in sleep_save_stash. * * cpu_resume() will restore this saved state, and return. Because the * link-register is saved and restored, it will appear to return from this * function. So that the caller can tell the suspend/resume paths apart, * __cpu_suspend_enter() will always return a non-zero value, whereas the * path through cpu_resume() will return 0. * * x0 = struct sleep_stack_data area */ SYM_FUNC_START(__cpu_suspend_enter) stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS] stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16] stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32] stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48] stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64] stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80] /* save the sp in cpu_suspend_ctx */ mov x2, sp str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP] /* find the mpidr_hash */ ldr_l x1, sleep_save_stash mrs x7, mpidr_el1 adr_l x9, mpidr_hash ldr x10, [x9, #MPIDR_HASH_MASK] /* * Following code relies on the struct mpidr_hash * members size. */ ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 add x1, x1, x8, lsl #3 str x0, [x1] add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS stp x29, lr, [sp, #-16]! bl cpu_do_suspend ldp x29, lr, [sp], #16 mov x0, #1 ret SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" SYM_CODE_START(cpu_resume) bl init_kernel_el bl finalise_el2 #if VA_BITS > 48 ldr_l x0, vabits_actual #endif bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir adrp x2, idmap_pg_dir bl __enable_mmu ldr x8, =_cpu_resume br x8 SYM_CODE_END(cpu_resume) .ltorg .popsection SYM_FUNC_START(_cpu_resume) mrs x1, mpidr_el1 adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address /* retrieve mpidr_hash members to compute the hash */ ldr x2, [x8, #MPIDR_HASH_MASK] ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 /* x7 contains hash index, let's use it to grab context pointer */ ldr_l x0, sleep_save_stash ldr x0, [x0, x7, lsl #3] add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] mov sp, x2 /* * cpu_do_resume expects x0 to contain context address pointer */ bl cpu_do_resume #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) mov x0, sp bl kasan_unpoison_task_stack_below #endif ldp x19, x20, [x29, #16] ldp x21, x22, [x29, #32] ldp x23, x24, [x29, #48] ldp x25, x26, [x29, #64] ldp x27, x28, [x29, #80] ldp x29, lr, [x29] mov x0, #0 ret SYM_FUNC_END(_cpu_resume)
aixcc-public/challenge-001-exemplar-source
7,321
arch/arm64/kernel/hyp-stub.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Hypervisor stub * * Copyright (C) 2012 ARM Ltd. * Author: Marc Zyngier <marc.zyngier@arm.com> */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/el2_setup.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/ptrace.h> #include <asm/virt.h> // Warning, hardcoded register allocation // This will clobber x1 and x2, and expect x1 to contain // the id register value as read from the HW .macro __check_override idreg, fld, width, pass, fail ubfx x1, x1, #\fld, #\width cbz x1, \fail adr_l x1, \idreg\()_override ldr x2, [x1, FTR_OVR_VAL_OFFSET] ldr x1, [x1, FTR_OVR_MASK_OFFSET] ubfx x2, x2, #\fld, #\width ubfx x1, x1, #\fld, #\width cmp x1, xzr and x2, x2, x1 csinv x2, x2, xzr, ne cbnz x2, \pass b \fail .endm .macro check_override idreg, fld, pass, fail mrs x1, \idreg\()_el1 __check_override \idreg \fld 4 \pass \fail .endm .text .pushsection .hyp.text, "ax" .align 11 SYM_CODE_START(__hyp_stub_vectors) ventry el2_sync_invalid // Synchronous EL2t ventry el2_irq_invalid // IRQ EL2t ventry el2_fiq_invalid // FIQ EL2t ventry el2_error_invalid // Error EL2t ventry elx_sync // Synchronous EL2h ventry el2_irq_invalid // IRQ EL2h ventry el2_fiq_invalid // FIQ EL2h ventry el2_error_invalid // Error EL2h ventry elx_sync // Synchronous 64-bit EL1 ventry el1_irq_invalid // IRQ 64-bit EL1 ventry el1_fiq_invalid // FIQ 64-bit EL1 ventry el1_error_invalid // Error 64-bit EL1 ventry el1_sync_invalid // Synchronous 32-bit EL1 ventry el1_irq_invalid // IRQ 32-bit EL1 ventry el1_fiq_invalid // FIQ 32-bit EL1 ventry el1_error_invalid // Error 32-bit EL1 SYM_CODE_END(__hyp_stub_vectors) .align 11 SYM_CODE_START_LOCAL(elx_sync) cmp x0, #HVC_SET_VECTORS b.ne 1f msr vbar_el2, x1 b 9f 1: cmp x0, #HVC_FINALISE_EL2 b.eq __finalise_el2 2: cmp x0, #HVC_SOFT_RESTART b.ne 3f mov x0, x2 mov x2, x4 mov x4, x1 mov x1, x3 br x4 // no return 3: cmp x0, #HVC_RESET_VECTORS beq 9f // Nothing to reset! /* Someone called kvm_call_hyp() against the hyp-stub... */ mov_q x0, HVC_STUB_ERR eret 9: mov x0, xzr eret SYM_CODE_END(elx_sync) SYM_CODE_START_LOCAL(__finalise_el2) check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve .Linit_sve: /* SVE register access */ mrs x0, cptr_el2 // Disable SVE traps bic x0, x0, #CPTR_EL2_TZ msr cptr_el2, x0 isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. .Lskip_sve: check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme .Linit_sme: /* SME register access and priority mapping */ mrs x0, cptr_el2 // Disable SME traps bic x0, x0, #CPTR_EL2_TSM msr cptr_el2, x0 isb mrs x1, sctlr_el2 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps msr sctlr_el2, x1 isb mov x0, #0 // SMCR controls // Full FP in SM? mrs_s x1, SYS_ID_AA64SMFR0_EL1 __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_FA64_SHIFT 1 .Linit_sme_fa64 .Lskip_sme_fa64 .Linit_sme_fa64: orr x0, x0, SMCR_ELx_FA64_MASK .Lskip_sme_fa64: orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector msr_s SYS_SMCR_EL2, x0 // length for EL1. mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 cbz x1, .Lskip_sme msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 cbz x1, .Lskip_sme mrs_s x1, SYS_HCRX_EL2 orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping msr_s SYS_HCRX_EL2, x1 .Lskip_sme: // nVHE? No way! Give me the real thing! // Sanity check: MMU *must* be off mrs x1, sctlr_el2 tbnz x1, #0, 1f // Needs to be VHE capable, obviously check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f 1: mov_q x0, HVC_STUB_ERR eret 2: // Engage the VHE magic! mov_q x0, HCR_HOST_VHE_FLAGS msr hcr_el2, x0 isb // Use the EL1 allocated stack, per-cpu offset mrs x0, sp_el1 mov sp, x0 mrs x0, tpidr_el1 msr tpidr_el2, x0 // FP configuration, vectors mrs_s x0, SYS_CPACR_EL12 msr cpacr_el1, x0 mrs_s x0, SYS_VBAR_EL12 msr vbar_el1, x0 // Use EL2 translations for SPE & TRBE and disable access from EL1 mrs x0, mdcr_el2 bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT) msr mdcr_el2, x0 // Transfer the MM state from EL1 to EL2 mrs_s x0, SYS_TCR_EL12 msr tcr_el1, x0 mrs_s x0, SYS_TTBR0_EL12 msr ttbr0_el1, x0 mrs_s x0, SYS_TTBR1_EL12 msr ttbr1_el1, x0 mrs_s x0, SYS_MAIR_EL12 msr mair_el1, x0 isb // Hack the exception return to stay at EL2 mrs x0, spsr_el1 and x0, x0, #~PSR_MODE_MASK mov x1, #PSR_MODE_EL2h orr x0, x0, x1 msr spsr_el1, x0 b enter_vhe SYM_CODE_END(__finalise_el2) // At the point where we reach enter_vhe(), we run with // the MMU off (which is enforced by __finalise_el2()). // We thus need to be in the idmap, or everything will // explode when enabling the MMU. .pushsection .idmap.text, "ax" SYM_CODE_START_LOCAL(enter_vhe) // Invalidate TLBs before enabling the MMU tlbi vmalle1 dsb nsh isb // Enable the EL2 S1 MMU, as set up from EL1 mrs_s x0, SYS_SCTLR_EL12 set_sctlr_el1 x0 // Disable the EL1 S1 MMU for a good measure mov_q x0, INIT_SCTLR_EL1_MMU_OFF msr_s SYS_SCTLR_EL12, x0 mov x0, xzr eret SYM_CODE_END(enter_vhe) .popsection .macro invalid_vector label SYM_CODE_START_LOCAL(\label) b \label SYM_CODE_END(\label) .endm invalid_vector el2_sync_invalid invalid_vector el2_irq_invalid invalid_vector el2_fiq_invalid invalid_vector el2_error_invalid invalid_vector el1_sync_invalid invalid_vector el1_irq_invalid invalid_vector el1_fiq_invalid invalid_vector el1_error_invalid .popsection /* * __hyp_set_vectors: Call this after boot to set the initial hypervisor * vectors as part of hypervisor installation. On an SMP system, this should * be called on each CPU. * * x0 must be the physical address of the new vector table, and must be * 2KB aligned. * * Before calling this, you must check that the stub hypervisor is installed * everywhere, by waiting for any secondary CPUs to be brought up and then * checking that is_hyp_mode_available() is true. * * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or * something else went wrong... in such cases, trying to install a new * hypervisor is unlikely to work as desired. * * When you call into your shiny new hypervisor, sp_el2 will contain junk, * so you will need to set that to something sensible at the new hypervisor's * initialisation entry point. */ SYM_FUNC_START(__hyp_set_vectors) mov x1, x0 mov x0, #HVC_SET_VECTORS hvc #0 ret SYM_FUNC_END(__hyp_set_vectors) SYM_FUNC_START(__hyp_reset_vectors) mov x0, #HVC_RESET_VECTORS hvc #0 ret SYM_FUNC_END(__hyp_reset_vectors) /* * Entry point to finalise EL2 and switch to VHE if deemed capable * * w0: boot mode, as returned by init_kernel_el() */ SYM_FUNC_START(finalise_el2) // Need to have booted at EL2 cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f // and still be at EL1 mrs x0, CurrentEL cmp x0, #CurrentEL_EL1 b.ne 1f mov x0, #HVC_FINALISE_EL2 hvc #0 1: ret SYM_FUNC_END(finalise_el2)
aixcc-public/challenge-001-exemplar-source
6,630
arch/arm64/crypto/sha512-ce-core.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * sha512-ce-core.S - core SHA-384/SHA-512 transform using v8 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 .set .Lq\b, \b .set .Lv\b\().2d, \b .endr .macro sha512h, rd, rn, rm .inst 0xce608000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sha512h2, rd, rn, rm .inst 0xce608400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sha512su0, rd, rn .inst 0xcec08000 | .L\rd | (.L\rn << 5) .endm .macro sha512su1, rd, rn, rm .inst 0xce608800 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm /* * The SHA-512 round constants */ .section ".rodata", "a" .align 4 .Lsha512_rcon: .quad 0x428a2f98d728ae22, 0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc .quad 0x3956c25bf348b538, 0x59f111f1b605d019 .quad 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242, 0x12835b0145706fbe .quad 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235, 0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 .quad 0x983e5152ee66dfab, 0xa831c66d2db43210 .quad 0xb00327c898fb213f, 0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2, 0xd5a79147930aa725 .quad 0x06ca6351e003826f, 0x142929670a0e6e70 .quad 0x27b70a8546d22ffc, 0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df .quad 0x650a73548baf63de, 0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6, 0x92722c851482353b .quad 0xa2bfe8a14cf10364, 0xa81a664bbc423001 .quad 0xc24b8b70d0f89791, 0xc76c51a30654be30 .quad 0xd192e819d6ef5218, 0xd69906245565a910 .quad 0xf40e35855771202a, 0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 .quad 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc, 0x78a5636f43172f60 .quad 0x84c87814a1f0ab72, 0x8cc702081a6439ec .quad 0x90befffa23631e28, 0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915, 0xc67178f2e372532b .quad 0xca273eceea26619c, 0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba, 0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae, 0x1b710b35131c471b .quad 0x28db77f523047d84, 0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 .macro dround, i0, i1, i2, i3, i4, rc0, rc1, in0, in1, in2, in3, in4 .ifnb \rc1 ld1 {v\rc1\().2d}, [x4], #16 .endif add v5.2d, v\rc0\().2d, v\in0\().2d ext v6.16b, v\i2\().16b, v\i3\().16b, #8 ext v5.16b, v5.16b, v5.16b, #8 ext v7.16b, v\i1\().16b, v\i2\().16b, #8 add v\i3\().2d, v\i3\().2d, v5.2d .ifnb \in1 ext v5.16b, v\in3\().16b, v\in4\().16b, #8 sha512su0 v\in0\().2d, v\in1\().2d .endif sha512h q\i3, q6, v7.2d .ifnb \in1 sha512su1 v\in0\().2d, v\in2\().2d, v5.2d .endif add v\i4\().2d, v\i1\().2d, v\i3\().2d sha512h2 q\i3, q\i1, v\i0\().2d .endm /* * void sha512_ce_transform(struct sha512_state *sst, u8 const *src, * int blocks) */ .text SYM_FUNC_START(sha512_ce_transform) /* load state */ ld1 {v8.2d-v11.2d}, [x0] /* load first 4 round constants */ adr_l x3, .Lsha512_rcon ld1 {v20.2d-v23.2d}, [x3], #64 /* load input */ 0: ld1 {v12.2d-v15.2d}, [x1], #64 ld1 {v16.2d-v19.2d}, [x1], #64 sub w2, w2, #1 CPU_LE( rev64 v12.16b, v12.16b ) CPU_LE( rev64 v13.16b, v13.16b ) CPU_LE( rev64 v14.16b, v14.16b ) CPU_LE( rev64 v15.16b, v15.16b ) CPU_LE( rev64 v16.16b, v16.16b ) CPU_LE( rev64 v17.16b, v17.16b ) CPU_LE( rev64 v18.16b, v18.16b ) CPU_LE( rev64 v19.16b, v19.16b ) mov x4, x3 // rc pointer mov v0.16b, v8.16b mov v1.16b, v9.16b mov v2.16b, v10.16b mov v3.16b, v11.16b // v0 ab cd -- ef gh ab // v1 cd -- ef gh ab cd // v2 ef gh ab cd -- ef // v3 gh ab cd -- ef gh // v4 -- ef gh ab cd -- dround 0, 1, 2, 3, 4, 20, 24, 12, 13, 19, 16, 17 dround 3, 0, 4, 2, 1, 21, 25, 13, 14, 12, 17, 18 dround 2, 3, 1, 4, 0, 22, 26, 14, 15, 13, 18, 19 dround 4, 2, 0, 1, 3, 23, 27, 15, 16, 14, 19, 12 dround 1, 4, 3, 0, 2, 24, 28, 16, 17, 15, 12, 13 dround 0, 1, 2, 3, 4, 25, 29, 17, 18, 16, 13, 14 dround 3, 0, 4, 2, 1, 26, 30, 18, 19, 17, 14, 15 dround 2, 3, 1, 4, 0, 27, 31, 19, 12, 18, 15, 16 dround 4, 2, 0, 1, 3, 28, 24, 12, 13, 19, 16, 17 dround 1, 4, 3, 0, 2, 29, 25, 13, 14, 12, 17, 18 dround 0, 1, 2, 3, 4, 30, 26, 14, 15, 13, 18, 19 dround 3, 0, 4, 2, 1, 31, 27, 15, 16, 14, 19, 12 dround 2, 3, 1, 4, 0, 24, 28, 16, 17, 15, 12, 13 dround 4, 2, 0, 1, 3, 25, 29, 17, 18, 16, 13, 14 dround 1, 4, 3, 0, 2, 26, 30, 18, 19, 17, 14, 15 dround 0, 1, 2, 3, 4, 27, 31, 19, 12, 18, 15, 16 dround 3, 0, 4, 2, 1, 28, 24, 12, 13, 19, 16, 17 dround 2, 3, 1, 4, 0, 29, 25, 13, 14, 12, 17, 18 dround 4, 2, 0, 1, 3, 30, 26, 14, 15, 13, 18, 19 dround 1, 4, 3, 0, 2, 31, 27, 15, 16, 14, 19, 12 dround 0, 1, 2, 3, 4, 24, 28, 16, 17, 15, 12, 13 dround 3, 0, 4, 2, 1, 25, 29, 17, 18, 16, 13, 14 dround 2, 3, 1, 4, 0, 26, 30, 18, 19, 17, 14, 15 dround 4, 2, 0, 1, 3, 27, 31, 19, 12, 18, 15, 16 dround 1, 4, 3, 0, 2, 28, 24, 12, 13, 19, 16, 17 dround 0, 1, 2, 3, 4, 29, 25, 13, 14, 12, 17, 18 dround 3, 0, 4, 2, 1, 30, 26, 14, 15, 13, 18, 19 dround 2, 3, 1, 4, 0, 31, 27, 15, 16, 14, 19, 12 dround 4, 2, 0, 1, 3, 24, 28, 16, 17, 15, 12, 13 dround 1, 4, 3, 0, 2, 25, 29, 17, 18, 16, 13, 14 dround 0, 1, 2, 3, 4, 26, 30, 18, 19, 17, 14, 15 dround 3, 0, 4, 2, 1, 27, 31, 19, 12, 18, 15, 16 dround 2, 3, 1, 4, 0, 28, 24, 12 dround 4, 2, 0, 1, 3, 29, 25, 13 dround 1, 4, 3, 0, 2, 30, 26, 14 dround 0, 1, 2, 3, 4, 31, 27, 15 dround 3, 0, 4, 2, 1, 24, , 16 dround 2, 3, 1, 4, 0, 25, , 17 dround 4, 2, 0, 1, 3, 26, , 18 dround 1, 4, 3, 0, 2, 27, , 19 /* update state */ add v8.2d, v8.2d, v0.2d add v9.2d, v9.2d, v1.2d add v10.2d, v10.2d, v2.2d add v11.2d, v11.2d, v3.2d cond_yield 3f, x4, x5 /* handled all input blocks? */ cbnz w2, 0b /* store new state */ 3: st1 {v8.2d-v11.2d}, [x0] mov w0, w2 ret SYM_FUNC_END(sha512_ce_transform)
aixcc-public/challenge-001-exemplar-source
15,378
arch/arm64/crypto/sm4-ce-core.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * SM4 Cipher Algorithm for ARMv8 with Crypto Extensions * as specified in * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html * * Copyright (C) 2022, Alibaba Group. * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ #include <linux/linkage.h> #include <asm/assembler.h> .arch armv8-a+crypto .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 16, 20, 24, 25, 26, 27, 28, 29, 30, 31 .set .Lv\b\().4s, \b .endr .macro sm4e, vd, vn .inst 0xcec08400 | (.L\vn << 5) | .L\vd .endm .macro sm4ekey, vd, vn, vm .inst 0xce60c800 | (.L\vm << 16) | (.L\vn << 5) | .L\vd .endm /* Register macros */ #define RTMP0 v16 #define RTMP1 v17 #define RTMP2 v18 #define RTMP3 v19 #define RIV v20 /* Helper macros. */ #define PREPARE \ ld1 {v24.16b-v27.16b}, [x0], #64; \ ld1 {v28.16b-v31.16b}, [x0]; #define SM4_CRYPT_BLK(b0) \ rev32 b0.16b, b0.16b; \ sm4e b0.4s, v24.4s; \ sm4e b0.4s, v25.4s; \ sm4e b0.4s, v26.4s; \ sm4e b0.4s, v27.4s; \ sm4e b0.4s, v28.4s; \ sm4e b0.4s, v29.4s; \ sm4e b0.4s, v30.4s; \ sm4e b0.4s, v31.4s; \ rev64 b0.4s, b0.4s; \ ext b0.16b, b0.16b, b0.16b, #8; \ rev32 b0.16b, b0.16b; #define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ sm4e b0.4s, v24.4s; \ sm4e b1.4s, v24.4s; \ sm4e b2.4s, v24.4s; \ sm4e b3.4s, v24.4s; \ sm4e b0.4s, v25.4s; \ sm4e b1.4s, v25.4s; \ sm4e b2.4s, v25.4s; \ sm4e b3.4s, v25.4s; \ sm4e b0.4s, v26.4s; \ sm4e b1.4s, v26.4s; \ sm4e b2.4s, v26.4s; \ sm4e b3.4s, v26.4s; \ sm4e b0.4s, v27.4s; \ sm4e b1.4s, v27.4s; \ sm4e b2.4s, v27.4s; \ sm4e b3.4s, v27.4s; \ sm4e b0.4s, v28.4s; \ sm4e b1.4s, v28.4s; \ sm4e b2.4s, v28.4s; \ sm4e b3.4s, v28.4s; \ sm4e b0.4s, v29.4s; \ sm4e b1.4s, v29.4s; \ sm4e b2.4s, v29.4s; \ sm4e b3.4s, v29.4s; \ sm4e b0.4s, v30.4s; \ sm4e b1.4s, v30.4s; \ sm4e b2.4s, v30.4s; \ sm4e b3.4s, v30.4s; \ sm4e b0.4s, v31.4s; \ sm4e b1.4s, v31.4s; \ sm4e b2.4s, v31.4s; \ sm4e b3.4s, v31.4s; \ rev64 b0.4s, b0.4s; \ rev64 b1.4s, b1.4s; \ rev64 b2.4s, b2.4s; \ rev64 b3.4s, b3.4s; \ ext b0.16b, b0.16b, b0.16b, #8; \ ext b1.16b, b1.16b, b1.16b, #8; \ ext b2.16b, b2.16b, b2.16b, #8; \ ext b3.16b, b3.16b, b3.16b, #8; \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; #define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ rev32 b4.16b, b4.16b; \ rev32 b5.16b, b5.16b; \ rev32 b6.16b, b6.16b; \ rev32 b7.16b, b7.16b; \ sm4e b0.4s, v24.4s; \ sm4e b1.4s, v24.4s; \ sm4e b2.4s, v24.4s; \ sm4e b3.4s, v24.4s; \ sm4e b4.4s, v24.4s; \ sm4e b5.4s, v24.4s; \ sm4e b6.4s, v24.4s; \ sm4e b7.4s, v24.4s; \ sm4e b0.4s, v25.4s; \ sm4e b1.4s, v25.4s; \ sm4e b2.4s, v25.4s; \ sm4e b3.4s, v25.4s; \ sm4e b4.4s, v25.4s; \ sm4e b5.4s, v25.4s; \ sm4e b6.4s, v25.4s; \ sm4e b7.4s, v25.4s; \ sm4e b0.4s, v26.4s; \ sm4e b1.4s, v26.4s; \ sm4e b2.4s, v26.4s; \ sm4e b3.4s, v26.4s; \ sm4e b4.4s, v26.4s; \ sm4e b5.4s, v26.4s; \ sm4e b6.4s, v26.4s; \ sm4e b7.4s, v26.4s; \ sm4e b0.4s, v27.4s; \ sm4e b1.4s, v27.4s; \ sm4e b2.4s, v27.4s; \ sm4e b3.4s, v27.4s; \ sm4e b4.4s, v27.4s; \ sm4e b5.4s, v27.4s; \ sm4e b6.4s, v27.4s; \ sm4e b7.4s, v27.4s; \ sm4e b0.4s, v28.4s; \ sm4e b1.4s, v28.4s; \ sm4e b2.4s, v28.4s; \ sm4e b3.4s, v28.4s; \ sm4e b4.4s, v28.4s; \ sm4e b5.4s, v28.4s; \ sm4e b6.4s, v28.4s; \ sm4e b7.4s, v28.4s; \ sm4e b0.4s, v29.4s; \ sm4e b1.4s, v29.4s; \ sm4e b2.4s, v29.4s; \ sm4e b3.4s, v29.4s; \ sm4e b4.4s, v29.4s; \ sm4e b5.4s, v29.4s; \ sm4e b6.4s, v29.4s; \ sm4e b7.4s, v29.4s; \ sm4e b0.4s, v30.4s; \ sm4e b1.4s, v30.4s; \ sm4e b2.4s, v30.4s; \ sm4e b3.4s, v30.4s; \ sm4e b4.4s, v30.4s; \ sm4e b5.4s, v30.4s; \ sm4e b6.4s, v30.4s; \ sm4e b7.4s, v30.4s; \ sm4e b0.4s, v31.4s; \ sm4e b1.4s, v31.4s; \ sm4e b2.4s, v31.4s; \ sm4e b3.4s, v31.4s; \ sm4e b4.4s, v31.4s; \ sm4e b5.4s, v31.4s; \ sm4e b6.4s, v31.4s; \ sm4e b7.4s, v31.4s; \ rev64 b0.4s, b0.4s; \ rev64 b1.4s, b1.4s; \ rev64 b2.4s, b2.4s; \ rev64 b3.4s, b3.4s; \ rev64 b4.4s, b4.4s; \ rev64 b5.4s, b5.4s; \ rev64 b6.4s, b6.4s; \ rev64 b7.4s, b7.4s; \ ext b0.16b, b0.16b, b0.16b, #8; \ ext b1.16b, b1.16b, b1.16b, #8; \ ext b2.16b, b2.16b, b2.16b, #8; \ ext b3.16b, b3.16b, b3.16b, #8; \ ext b4.16b, b4.16b, b4.16b, #8; \ ext b5.16b, b5.16b, b5.16b, #8; \ ext b6.16b, b6.16b, b6.16b, #8; \ ext b7.16b, b7.16b, b7.16b, #8; \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ rev32 b4.16b, b4.16b; \ rev32 b5.16b, b5.16b; \ rev32 b6.16b, b6.16b; \ rev32 b7.16b, b7.16b; .align 3 SYM_FUNC_START(sm4_ce_expand_key) /* input: * x0: 128-bit key * x1: rkey_enc * x2: rkey_dec * x3: fk array * x4: ck array */ ld1 {v0.16b}, [x0]; rev32 v0.16b, v0.16b; ld1 {v1.16b}, [x3]; /* load ck */ ld1 {v24.16b-v27.16b}, [x4], #64; ld1 {v28.16b-v31.16b}, [x4]; /* input ^ fk */ eor v0.16b, v0.16b, v1.16b; sm4ekey v0.4s, v0.4s, v24.4s; sm4ekey v1.4s, v0.4s, v25.4s; sm4ekey v2.4s, v1.4s, v26.4s; sm4ekey v3.4s, v2.4s, v27.4s; sm4ekey v4.4s, v3.4s, v28.4s; sm4ekey v5.4s, v4.4s, v29.4s; sm4ekey v6.4s, v5.4s, v30.4s; sm4ekey v7.4s, v6.4s, v31.4s; st1 {v0.16b-v3.16b}, [x1], #64; st1 {v4.16b-v7.16b}, [x1]; rev64 v7.4s, v7.4s; rev64 v6.4s, v6.4s; rev64 v5.4s, v5.4s; rev64 v4.4s, v4.4s; rev64 v3.4s, v3.4s; rev64 v2.4s, v2.4s; rev64 v1.4s, v1.4s; rev64 v0.4s, v0.4s; ext v7.16b, v7.16b, v7.16b, #8; ext v6.16b, v6.16b, v6.16b, #8; ext v5.16b, v5.16b, v5.16b, #8; ext v4.16b, v4.16b, v4.16b, #8; ext v3.16b, v3.16b, v3.16b, #8; ext v2.16b, v2.16b, v2.16b, #8; ext v1.16b, v1.16b, v1.16b, #8; ext v0.16b, v0.16b, v0.16b, #8; st1 {v7.16b}, [x2], #16; st1 {v6.16b}, [x2], #16; st1 {v5.16b}, [x2], #16; st1 {v4.16b}, [x2], #16; st1 {v3.16b}, [x2], #16; st1 {v2.16b}, [x2], #16; st1 {v1.16b}, [x2], #16; st1 {v0.16b}, [x2]; ret; SYM_FUNC_END(sm4_ce_expand_key) .align 3 SYM_FUNC_START(sm4_ce_crypt_block) /* input: * x0: round key array, CTX * x1: dst * x2: src */ PREPARE; ld1 {v0.16b}, [x2]; SM4_CRYPT_BLK(v0); st1 {v0.16b}, [x1]; ret; SYM_FUNC_END(sm4_ce_crypt_block) .align 3 SYM_FUNC_START(sm4_ce_crypt) /* input: * x0: round key array, CTX * x1: dst * x2: src * w3: nblocks */ PREPARE; .Lcrypt_loop_blk: sub w3, w3, #8; tbnz w3, #31, .Lcrypt_tail8; ld1 {v0.16b-v3.16b}, [x2], #64; ld1 {v4.16b-v7.16b}, [x2], #64; SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); st1 {v0.16b-v3.16b}, [x1], #64; st1 {v4.16b-v7.16b}, [x1], #64; cbz w3, .Lcrypt_end; b .Lcrypt_loop_blk; .Lcrypt_tail8: add w3, w3, #8; cmp w3, #4; blt .Lcrypt_tail4; sub w3, w3, #4; ld1 {v0.16b-v3.16b}, [x2], #64; SM4_CRYPT_BLK4(v0, v1, v2, v3); st1 {v0.16b-v3.16b}, [x1], #64; cbz w3, .Lcrypt_end; .Lcrypt_tail4: sub w3, w3, #1; ld1 {v0.16b}, [x2], #16; SM4_CRYPT_BLK(v0); st1 {v0.16b}, [x1], #16; cbnz w3, .Lcrypt_tail4; .Lcrypt_end: ret; SYM_FUNC_END(sm4_ce_crypt) .align 3 SYM_FUNC_START(sm4_ce_cbc_enc) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) * w4: nblocks */ PREPARE; ld1 {RIV.16b}, [x3]; .Lcbc_enc_loop: sub w4, w4, #1; ld1 {RTMP0.16b}, [x2], #16; eor RIV.16b, RIV.16b, RTMP0.16b; SM4_CRYPT_BLK(RIV); st1 {RIV.16b}, [x1], #16; cbnz w4, .Lcbc_enc_loop; /* store new IV */ st1 {RIV.16b}, [x3]; ret; SYM_FUNC_END(sm4_ce_cbc_enc) .align 3 SYM_FUNC_START(sm4_ce_cbc_dec) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) * w4: nblocks */ PREPARE; ld1 {RIV.16b}, [x3]; .Lcbc_loop_blk: sub w4, w4, #8; tbnz w4, #31, .Lcbc_tail8; ld1 {v0.16b-v3.16b}, [x2], #64; ld1 {v4.16b-v7.16b}, [x2]; SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); sub x2, x2, #64; eor v0.16b, v0.16b, RIV.16b; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v1.16b, v1.16b, RTMP0.16b; eor v2.16b, v2.16b, RTMP1.16b; eor v3.16b, v3.16b, RTMP2.16b; st1 {v0.16b-v3.16b}, [x1], #64; eor v4.16b, v4.16b, RTMP3.16b; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v5.16b, v5.16b, RTMP0.16b; eor v6.16b, v6.16b, RTMP1.16b; eor v7.16b, v7.16b, RTMP2.16b; mov RIV.16b, RTMP3.16b; st1 {v4.16b-v7.16b}, [x1], #64; cbz w4, .Lcbc_end; b .Lcbc_loop_blk; .Lcbc_tail8: add w4, w4, #8; cmp w4, #4; blt .Lcbc_tail4; sub w4, w4, #4; ld1 {v0.16b-v3.16b}, [x2]; SM4_CRYPT_BLK4(v0, v1, v2, v3); eor v0.16b, v0.16b, RIV.16b; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v1.16b, v1.16b, RTMP0.16b; eor v2.16b, v2.16b, RTMP1.16b; eor v3.16b, v3.16b, RTMP2.16b; mov RIV.16b, RTMP3.16b; st1 {v0.16b-v3.16b}, [x1], #64; cbz w4, .Lcbc_end; .Lcbc_tail4: sub w4, w4, #1; ld1 {v0.16b}, [x2]; SM4_CRYPT_BLK(v0); eor v0.16b, v0.16b, RIV.16b; ld1 {RIV.16b}, [x2], #16; st1 {v0.16b}, [x1], #16; cbnz w4, .Lcbc_tail4; .Lcbc_end: /* store new IV */ st1 {RIV.16b}, [x3]; ret; SYM_FUNC_END(sm4_ce_cbc_dec) .align 3 SYM_FUNC_START(sm4_ce_cfb_enc) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) * w4: nblocks */ PREPARE; ld1 {RIV.16b}, [x3]; .Lcfb_enc_loop: sub w4, w4, #1; SM4_CRYPT_BLK(RIV); ld1 {RTMP0.16b}, [x2], #16; eor RIV.16b, RIV.16b, RTMP0.16b; st1 {RIV.16b}, [x1], #16; cbnz w4, .Lcfb_enc_loop; /* store new IV */ st1 {RIV.16b}, [x3]; ret; SYM_FUNC_END(sm4_ce_cfb_enc) .align 3 SYM_FUNC_START(sm4_ce_cfb_dec) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) * w4: nblocks */ PREPARE; ld1 {v0.16b}, [x3]; .Lcfb_loop_blk: sub w4, w4, #8; tbnz w4, #31, .Lcfb_tail8; ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; ld1 {v4.16b-v7.16b}, [x2]; SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); sub x2, x2, #48; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v0.16b, v0.16b, RTMP0.16b; eor v1.16b, v1.16b, RTMP1.16b; eor v2.16b, v2.16b, RTMP2.16b; eor v3.16b, v3.16b, RTMP3.16b; st1 {v0.16b-v3.16b}, [x1], #64; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v4.16b, v4.16b, RTMP0.16b; eor v5.16b, v5.16b, RTMP1.16b; eor v6.16b, v6.16b, RTMP2.16b; eor v7.16b, v7.16b, RTMP3.16b; st1 {v4.16b-v7.16b}, [x1], #64; mov v0.16b, RTMP3.16b; cbz w4, .Lcfb_end; b .Lcfb_loop_blk; .Lcfb_tail8: add w4, w4, #8; cmp w4, #4; blt .Lcfb_tail4; sub w4, w4, #4; ld1 {v1.16b, v2.16b, v3.16b}, [x2]; SM4_CRYPT_BLK4(v0, v1, v2, v3); ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v0.16b, v0.16b, RTMP0.16b; eor v1.16b, v1.16b, RTMP1.16b; eor v2.16b, v2.16b, RTMP2.16b; eor v3.16b, v3.16b, RTMP3.16b; st1 {v0.16b-v3.16b}, [x1], #64; mov v0.16b, RTMP3.16b; cbz w4, .Lcfb_end; .Lcfb_tail4: sub w4, w4, #1; SM4_CRYPT_BLK(v0); ld1 {RTMP0.16b}, [x2], #16; eor v0.16b, v0.16b, RTMP0.16b; st1 {v0.16b}, [x1], #16; mov v0.16b, RTMP0.16b; cbnz w4, .Lcfb_tail4; .Lcfb_end: /* store new IV */ st1 {v0.16b}, [x3]; ret; SYM_FUNC_END(sm4_ce_cfb_dec) .align 3 SYM_FUNC_START(sm4_ce_ctr_enc) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: ctr (big endian, 128 bit) * w4: nblocks */ PREPARE; ldp x7, x8, [x3]; rev x7, x7; rev x8, x8; .Lctr_loop_blk: sub w4, w4, #8; tbnz w4, #31, .Lctr_tail8; #define inc_le128(vctr) \ mov vctr.d[1], x8; \ mov vctr.d[0], x7; \ adds x8, x8, #1; \ adc x7, x7, xzr; \ rev64 vctr.16b, vctr.16b; /* construct CTRs */ inc_le128(v0); /* +0 */ inc_le128(v1); /* +1 */ inc_le128(v2); /* +2 */ inc_le128(v3); /* +3 */ inc_le128(v4); /* +4 */ inc_le128(v5); /* +5 */ inc_le128(v6); /* +6 */ inc_le128(v7); /* +7 */ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v0.16b, v0.16b, RTMP0.16b; eor v1.16b, v1.16b, RTMP1.16b; eor v2.16b, v2.16b, RTMP2.16b; eor v3.16b, v3.16b, RTMP3.16b; st1 {v0.16b-v3.16b}, [x1], #64; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v4.16b, v4.16b, RTMP0.16b; eor v5.16b, v5.16b, RTMP1.16b; eor v6.16b, v6.16b, RTMP2.16b; eor v7.16b, v7.16b, RTMP3.16b; st1 {v4.16b-v7.16b}, [x1], #64; cbz w4, .Lctr_end; b .Lctr_loop_blk; .Lctr_tail8: add w4, w4, #8; cmp w4, #4; blt .Lctr_tail4; sub w4, w4, #4; /* construct CTRs */ inc_le128(v0); /* +0 */ inc_le128(v1); /* +1 */ inc_le128(v2); /* +2 */ inc_le128(v3); /* +3 */ SM4_CRYPT_BLK4(v0, v1, v2, v3); ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v0.16b, v0.16b, RTMP0.16b; eor v1.16b, v1.16b, RTMP1.16b; eor v2.16b, v2.16b, RTMP2.16b; eor v3.16b, v3.16b, RTMP3.16b; st1 {v0.16b-v3.16b}, [x1], #64; cbz w4, .Lctr_end; .Lctr_tail4: sub w4, w4, #1; /* construct CTRs */ inc_le128(v0); SM4_CRYPT_BLK(v0); ld1 {RTMP0.16b}, [x2], #16; eor v0.16b, v0.16b, RTMP0.16b; st1 {v0.16b}, [x1], #16; cbnz w4, .Lctr_tail4; .Lctr_end: /* store new CTR */ rev x7, x7; rev x8, x8; stp x7, x8, [x3]; ret; SYM_FUNC_END(sm4_ce_ctr_enc)
aixcc-public/challenge-001-exemplar-source
3,189
arch/arm64/crypto/sha1-ce-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions * * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> .text .arch armv8-a+crypto k0 .req v0 k1 .req v1 k2 .req v2 k3 .req v3 t0 .req v4 t1 .req v5 dga .req q6 dgav .req v6 dgb .req s7 dgbv .req v7 dg0q .req q12 dg0s .req s12 dg0v .req v12 dg1s .req s13 dg1v .req v13 dg2s .req s14 .macro add_only, op, ev, rc, s0, dg1 .ifc \ev, ev add t1.4s, v\s0\().4s, \rc\().4s sha1h dg2s, dg0s .ifnb \dg1 sha1\op dg0q, \dg1, t0.4s .else sha1\op dg0q, dg1s, t0.4s .endif .else .ifnb \s0 add t0.4s, v\s0\().4s, \rc\().4s .endif sha1h dg1s, dg0s sha1\op dg0q, dg2s, t1.4s .endif .endm .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s add_only \op, \ev, \rc, \s1, \dg1 sha1su1 v\s0\().4s, v\s3\().4s .endm .macro loadrc, k, val, tmp movz \tmp, :abs_g0_nc:\val movk \tmp, :abs_g1:\val dup \k, \tmp .endm /* * int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, * int blocks) */ SYM_FUNC_START(sha1_ce_transform) /* load round constants */ loadrc k0.4s, 0x5a827999, w6 loadrc k1.4s, 0x6ed9eba1, w6 loadrc k2.4s, 0x8f1bbcdc, w6 loadrc k3.4s, 0xca62c1d6, w6 /* load state */ ld1 {dgav.4s}, [x0] ldr dgb, [x0, #16] /* load sha1_ce_state::finalize */ ldr_l w4, sha1_ce_offsetof_finalize, x4 ldr w4, [x0, x4] /* load input */ 0: ld1 {v8.4s-v11.4s}, [x1], #64 sub w2, w2, #1 CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v9.16b, v9.16b ) CPU_LE( rev32 v10.16b, v10.16b ) CPU_LE( rev32 v11.16b, v11.16b ) 1: add t0.4s, v8.4s, k0.4s mov dg0v.16b, dgav.16b add_update c, ev, k0, 8, 9, 10, 11, dgb add_update c, od, k0, 9, 10, 11, 8 add_update c, ev, k0, 10, 11, 8, 9 add_update c, od, k0, 11, 8, 9, 10 add_update c, ev, k1, 8, 9, 10, 11 add_update p, od, k1, 9, 10, 11, 8 add_update p, ev, k1, 10, 11, 8, 9 add_update p, od, k1, 11, 8, 9, 10 add_update p, ev, k1, 8, 9, 10, 11 add_update p, od, k2, 9, 10, 11, 8 add_update m, ev, k2, 10, 11, 8, 9 add_update m, od, k2, 11, 8, 9, 10 add_update m, ev, k2, 8, 9, 10, 11 add_update m, od, k2, 9, 10, 11, 8 add_update m, ev, k3, 10, 11, 8, 9 add_update p, od, k3, 11, 8, 9, 10 add_only p, ev, k3, 9 add_only p, od, k3, 10 add_only p, ev, k3, 11 add_only p, od /* update state */ add dgbv.2s, dgbv.2s, dg1v.2s add dgav.4s, dgav.4s, dg0v.4s cbz w2, 2f cond_yield 3f, x5, x6 b 0b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ 2: cbz x4, 3f ldr_l w4, sha1_ce_offsetof_count, x4 ldr x4, [x0, x4] movi v9.2d, #0 mov x8, #0x80000000 movi v10.2d, #0 ror x7, x4, #29 // ror(lsl(x4, 3), 32) fmov d8, x8 mov x4, #0 mov v11.d[0], xzr mov v11.d[1], x7 b 1b /* store new state */ 3: st1 {dgav.4s}, [x0] str dgb, [x0, #16] mov w0, w2 ret SYM_FUNC_END(sha1_ce_transform)
aixcc-public/challenge-001-exemplar-source
15,473
arch/arm64/crypto/sm4-neon-core.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * SM4 Cipher Algorithm for ARMv8 NEON * as specified in * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html * * Copyright (C) 2022, Alibaba Group. * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ #include <linux/linkage.h> #include <asm/assembler.h> /* Register macros */ #define RTMP0 v8 #define RTMP1 v9 #define RTMP2 v10 #define RTMP3 v11 #define RX0 v12 #define RX1 v13 #define RKEY v14 #define RIV v15 /* Helper macros. */ #define PREPARE \ adr_l x5, crypto_sm4_sbox; \ ld1 {v16.16b-v19.16b}, [x5], #64; \ ld1 {v20.16b-v23.16b}, [x5], #64; \ ld1 {v24.16b-v27.16b}, [x5], #64; \ ld1 {v28.16b-v31.16b}, [x5]; #define transpose_4x4(s0, s1, s2, s3) \ zip1 RTMP0.4s, s0.4s, s1.4s; \ zip1 RTMP1.4s, s2.4s, s3.4s; \ zip2 RTMP2.4s, s0.4s, s1.4s; \ zip2 RTMP3.4s, s2.4s, s3.4s; \ zip1 s0.2d, RTMP0.2d, RTMP1.2d; \ zip2 s1.2d, RTMP0.2d, RTMP1.2d; \ zip1 s2.2d, RTMP2.2d, RTMP3.2d; \ zip2 s3.2d, RTMP2.2d, RTMP3.2d; #define rotate_clockwise_90(s0, s1, s2, s3) \ zip1 RTMP0.4s, s1.4s, s0.4s; \ zip2 RTMP1.4s, s1.4s, s0.4s; \ zip1 RTMP2.4s, s3.4s, s2.4s; \ zip2 RTMP3.4s, s3.4s, s2.4s; \ zip1 s0.2d, RTMP2.2d, RTMP0.2d; \ zip2 s1.2d, RTMP2.2d, RTMP0.2d; \ zip1 s2.2d, RTMP3.2d, RTMP1.2d; \ zip2 s3.2d, RTMP3.2d, RTMP1.2d; #define ROUND4(round, s0, s1, s2, s3) \ dup RX0.4s, RKEY.s[round]; \ /* rk ^ s1 ^ s2 ^ s3 */ \ eor RTMP1.16b, s2.16b, s3.16b; \ eor RX0.16b, RX0.16b, s1.16b; \ eor RX0.16b, RX0.16b, RTMP1.16b; \ \ /* sbox, non-linear part */ \ movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \ tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \ sub RX0.16b, RX0.16b, RTMP3.16b; \ tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \ sub RX0.16b, RX0.16b, RTMP3.16b; \ tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \ sub RX0.16b, RX0.16b, RTMP3.16b; \ tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \ \ /* linear part */ \ shl RTMP1.4s, RTMP0.4s, #8; \ shl RTMP2.4s, RTMP0.4s, #16; \ shl RTMP3.4s, RTMP0.4s, #24; \ sri RTMP1.4s, RTMP0.4s, #(32-8); \ sri RTMP2.4s, RTMP0.4s, #(32-16); \ sri RTMP3.4s, RTMP0.4s, #(32-24); \ /* RTMP1 = x ^ rol32(x, 8) ^ rol32(x, 16) */ \ eor RTMP1.16b, RTMP1.16b, RTMP0.16b; \ eor RTMP1.16b, RTMP1.16b, RTMP2.16b; \ /* RTMP3 = x ^ rol32(x, 24) ^ rol32(RTMP1, 2) */ \ eor RTMP3.16b, RTMP3.16b, RTMP0.16b; \ shl RTMP2.4s, RTMP1.4s, 2; \ sri RTMP2.4s, RTMP1.4s, #(32-2); \ eor RTMP3.16b, RTMP3.16b, RTMP2.16b; \ /* s0 ^= RTMP3 */ \ eor s0.16b, s0.16b, RTMP3.16b; #define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ \ transpose_4x4(b0, b1, b2, b3); \ \ mov x6, 8; \ 4: \ ld1 {RKEY.4s}, [x0], #16; \ subs x6, x6, #1; \ \ ROUND4(0, b0, b1, b2, b3); \ ROUND4(1, b1, b2, b3, b0); \ ROUND4(2, b2, b3, b0, b1); \ ROUND4(3, b3, b0, b1, b2); \ \ bne 4b; \ \ rotate_clockwise_90(b0, b1, b2, b3); \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ \ /* repoint to rkey */ \ sub x0, x0, #128; #define ROUND8(round, s0, s1, s2, s3, t0, t1, t2, t3) \ /* rk ^ s1 ^ s2 ^ s3 */ \ dup RX0.4s, RKEY.s[round]; \ eor RTMP0.16b, s2.16b, s3.16b; \ mov RX1.16b, RX0.16b; \ eor RTMP1.16b, t2.16b, t3.16b; \ eor RX0.16b, RX0.16b, s1.16b; \ eor RX1.16b, RX1.16b, t1.16b; \ eor RX0.16b, RX0.16b, RTMP0.16b; \ eor RX1.16b, RX1.16b, RTMP1.16b; \ \ /* sbox, non-linear part */ \ movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \ tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \ tbl RTMP1.16b, {v16.16b-v19.16b}, RX1.16b; \ sub RX0.16b, RX0.16b, RTMP3.16b; \ sub RX1.16b, RX1.16b, RTMP3.16b; \ tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \ tbx RTMP1.16b, {v20.16b-v23.16b}, RX1.16b; \ sub RX0.16b, RX0.16b, RTMP3.16b; \ sub RX1.16b, RX1.16b, RTMP3.16b; \ tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \ tbx RTMP1.16b, {v24.16b-v27.16b}, RX1.16b; \ sub RX0.16b, RX0.16b, RTMP3.16b; \ sub RX1.16b, RX1.16b, RTMP3.16b; \ tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \ tbx RTMP1.16b, {v28.16b-v31.16b}, RX1.16b; \ \ /* linear part */ \ shl RX0.4s, RTMP0.4s, #8; \ shl RX1.4s, RTMP1.4s, #8; \ shl RTMP2.4s, RTMP0.4s, #16; \ shl RTMP3.4s, RTMP1.4s, #16; \ sri RX0.4s, RTMP0.4s, #(32 - 8); \ sri RX1.4s, RTMP1.4s, #(32 - 8); \ sri RTMP2.4s, RTMP0.4s, #(32 - 16); \ sri RTMP3.4s, RTMP1.4s, #(32 - 16); \ /* RX = x ^ rol32(x, 8) ^ rol32(x, 16) */ \ eor RX0.16b, RX0.16b, RTMP0.16b; \ eor RX1.16b, RX1.16b, RTMP1.16b; \ eor RX0.16b, RX0.16b, RTMP2.16b; \ eor RX1.16b, RX1.16b, RTMP3.16b; \ /* RTMP0/1 ^= x ^ rol32(x, 24) ^ rol32(RX, 2) */ \ shl RTMP2.4s, RTMP0.4s, #24; \ shl RTMP3.4s, RTMP1.4s, #24; \ sri RTMP2.4s, RTMP0.4s, #(32 - 24); \ sri RTMP3.4s, RTMP1.4s, #(32 - 24); \ eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \ eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \ shl RTMP2.4s, RX0.4s, #2; \ shl RTMP3.4s, RX1.4s, #2; \ sri RTMP2.4s, RX0.4s, #(32 - 2); \ sri RTMP3.4s, RX1.4s, #(32 - 2); \ eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \ eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \ /* s0/t0 ^= RTMP0/1 */ \ eor s0.16b, s0.16b, RTMP0.16b; \ eor t0.16b, t0.16b, RTMP1.16b; #define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ rev32 b4.16b, b4.16b; \ rev32 b5.16b, b5.16b; \ rev32 b6.16b, b6.16b; \ rev32 b7.16b, b7.16b; \ \ transpose_4x4(b0, b1, b2, b3); \ transpose_4x4(b4, b5, b6, b7); \ \ mov x6, 8; \ 8: \ ld1 {RKEY.4s}, [x0], #16; \ subs x6, x6, #1; \ \ ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \ ROUND8(1, b1, b2, b3, b0, b5, b6, b7, b4); \ ROUND8(2, b2, b3, b0, b1, b6, b7, b4, b5); \ ROUND8(3, b3, b0, b1, b2, b7, b4, b5, b6); \ \ bne 8b; \ \ rotate_clockwise_90(b0, b1, b2, b3); \ rotate_clockwise_90(b4, b5, b6, b7); \ rev32 b0.16b, b0.16b; \ rev32 b1.16b, b1.16b; \ rev32 b2.16b, b2.16b; \ rev32 b3.16b, b3.16b; \ rev32 b4.16b, b4.16b; \ rev32 b5.16b, b5.16b; \ rev32 b6.16b, b6.16b; \ rev32 b7.16b, b7.16b; \ \ /* repoint to rkey */ \ sub x0, x0, #128; .align 3 SYM_FUNC_START_LOCAL(__sm4_neon_crypt_blk1_4) /* input: * x0: round key array, CTX * x1: dst * x2: src * w3: num blocks (1..4) */ PREPARE; ld1 {v0.16b}, [x2], #16; mov v1.16b, v0.16b; mov v2.16b, v0.16b; mov v3.16b, v0.16b; cmp w3, #2; blt .Lblk4_load_input_done; ld1 {v1.16b}, [x2], #16; beq .Lblk4_load_input_done; ld1 {v2.16b}, [x2], #16; cmp w3, #3; beq .Lblk4_load_input_done; ld1 {v3.16b}, [x2]; .Lblk4_load_input_done: SM4_CRYPT_BLK4(v0, v1, v2, v3); st1 {v0.16b}, [x1], #16; cmp w3, #2; blt .Lblk4_store_output_done; st1 {v1.16b}, [x1], #16; beq .Lblk4_store_output_done; st1 {v2.16b}, [x1], #16; cmp w3, #3; beq .Lblk4_store_output_done; st1 {v3.16b}, [x1]; .Lblk4_store_output_done: ret; SYM_FUNC_END(__sm4_neon_crypt_blk1_4) .align 3 SYM_FUNC_START(sm4_neon_crypt_blk1_8) /* input: * x0: round key array, CTX * x1: dst * x2: src * w3: num blocks (1..8) */ cmp w3, #5; blt __sm4_neon_crypt_blk1_4; PREPARE; ld1 {v0.16b-v3.16b}, [x2], #64; ld1 {v4.16b}, [x2], #16; mov v5.16b, v4.16b; mov v6.16b, v4.16b; mov v7.16b, v4.16b; beq .Lblk8_load_input_done; ld1 {v5.16b}, [x2], #16; cmp w3, #7; blt .Lblk8_load_input_done; ld1 {v6.16b}, [x2], #16; beq .Lblk8_load_input_done; ld1 {v7.16b}, [x2]; .Lblk8_load_input_done: SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); cmp w3, #6; st1 {v0.16b-v3.16b}, [x1], #64; st1 {v4.16b}, [x1], #16; blt .Lblk8_store_output_done; st1 {v5.16b}, [x1], #16; beq .Lblk8_store_output_done; st1 {v6.16b}, [x1], #16; cmp w3, #7; beq .Lblk8_store_output_done; st1 {v7.16b}, [x1]; .Lblk8_store_output_done: ret; SYM_FUNC_END(sm4_neon_crypt_blk1_8) .align 3 SYM_FUNC_START(sm4_neon_crypt_blk8) /* input: * x0: round key array, CTX * x1: dst * x2: src * w3: nblocks (multiples of 8) */ PREPARE; .Lcrypt_loop_blk: subs w3, w3, #8; bmi .Lcrypt_end; ld1 {v0.16b-v3.16b}, [x2], #64; ld1 {v4.16b-v7.16b}, [x2], #64; SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); st1 {v0.16b-v3.16b}, [x1], #64; st1 {v4.16b-v7.16b}, [x1], #64; b .Lcrypt_loop_blk; .Lcrypt_end: ret; SYM_FUNC_END(sm4_neon_crypt_blk8) .align 3 SYM_FUNC_START(sm4_neon_cbc_dec_blk8) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) * w4: nblocks (multiples of 8) */ PREPARE; ld1 {RIV.16b}, [x3]; .Lcbc_loop_blk: subs w4, w4, #8; bmi .Lcbc_end; ld1 {v0.16b-v3.16b}, [x2], #64; ld1 {v4.16b-v7.16b}, [x2]; SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); sub x2, x2, #64; eor v0.16b, v0.16b, RIV.16b; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v1.16b, v1.16b, RTMP0.16b; eor v2.16b, v2.16b, RTMP1.16b; eor v3.16b, v3.16b, RTMP2.16b; st1 {v0.16b-v3.16b}, [x1], #64; eor v4.16b, v4.16b, RTMP3.16b; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v5.16b, v5.16b, RTMP0.16b; eor v6.16b, v6.16b, RTMP1.16b; eor v7.16b, v7.16b, RTMP2.16b; mov RIV.16b, RTMP3.16b; st1 {v4.16b-v7.16b}, [x1], #64; b .Lcbc_loop_blk; .Lcbc_end: /* store new IV */ st1 {RIV.16b}, [x3]; ret; SYM_FUNC_END(sm4_neon_cbc_dec_blk8) .align 3 SYM_FUNC_START(sm4_neon_cfb_dec_blk8) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: iv (big endian, 128 bit) * w4: nblocks (multiples of 8) */ PREPARE; ld1 {v0.16b}, [x3]; .Lcfb_loop_blk: subs w4, w4, #8; bmi .Lcfb_end; ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; ld1 {v4.16b-v7.16b}, [x2]; SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); sub x2, x2, #48; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v0.16b, v0.16b, RTMP0.16b; eor v1.16b, v1.16b, RTMP1.16b; eor v2.16b, v2.16b, RTMP2.16b; eor v3.16b, v3.16b, RTMP3.16b; st1 {v0.16b-v3.16b}, [x1], #64; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v4.16b, v4.16b, RTMP0.16b; eor v5.16b, v5.16b, RTMP1.16b; eor v6.16b, v6.16b, RTMP2.16b; eor v7.16b, v7.16b, RTMP3.16b; st1 {v4.16b-v7.16b}, [x1], #64; mov v0.16b, RTMP3.16b; b .Lcfb_loop_blk; .Lcfb_end: /* store new IV */ st1 {v0.16b}, [x3]; ret; SYM_FUNC_END(sm4_neon_cfb_dec_blk8) .align 3 SYM_FUNC_START(sm4_neon_ctr_enc_blk8) /* input: * x0: round key array, CTX * x1: dst * x2: src * x3: ctr (big endian, 128 bit) * w4: nblocks (multiples of 8) */ PREPARE; ldp x7, x8, [x3]; rev x7, x7; rev x8, x8; .Lctr_loop_blk: subs w4, w4, #8; bmi .Lctr_end; #define inc_le128(vctr) \ mov vctr.d[1], x8; \ mov vctr.d[0], x7; \ adds x8, x8, #1; \ adc x7, x7, xzr; \ rev64 vctr.16b, vctr.16b; /* construct CTRs */ inc_le128(v0); /* +0 */ inc_le128(v1); /* +1 */ inc_le128(v2); /* +2 */ inc_le128(v3); /* +3 */ inc_le128(v4); /* +4 */ inc_le128(v5); /* +5 */ inc_le128(v6); /* +6 */ inc_le128(v7); /* +7 */ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v0.16b, v0.16b, RTMP0.16b; eor v1.16b, v1.16b, RTMP1.16b; eor v2.16b, v2.16b, RTMP2.16b; eor v3.16b, v3.16b, RTMP3.16b; st1 {v0.16b-v3.16b}, [x1], #64; ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; eor v4.16b, v4.16b, RTMP0.16b; eor v5.16b, v5.16b, RTMP1.16b; eor v6.16b, v6.16b, RTMP2.16b; eor v7.16b, v7.16b, RTMP3.16b; st1 {v4.16b-v7.16b}, [x1], #64; b .Lctr_loop_blk; .Lctr_end: /* store new CTR */ rev x7, x7; rev x8, x8; stp x7, x8, [x3]; ret; SYM_FUNC_END(sm4_neon_ctr_enc_blk8)
aixcc-public/challenge-001-exemplar-source
6,227
arch/arm64/crypto/sha3-ce-core.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 .set .Lv\b\().2d, \b .set .Lv\b\().16b, \b .endr /* * ARMv8.2 Crypto Extensions instructions */ .macro eor3, rd, rn, rm, ra .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) .endm .macro rax1, rd, rn, rm .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro bcax, rd, rn, rm, ra .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) .endm .macro xar, rd, rn, rm, imm6 .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16) .endm /* * int sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) */ .text SYM_FUNC_START(sha3_ce_transform) /* load state */ add x8, x0, #32 ld1 { v0.1d- v3.1d}, [x0] ld1 { v4.1d- v7.1d}, [x8], #32 ld1 { v8.1d-v11.1d}, [x8], #32 ld1 {v12.1d-v15.1d}, [x8], #32 ld1 {v16.1d-v19.1d}, [x8], #32 ld1 {v20.1d-v23.1d}, [x8], #32 ld1 {v24.1d}, [x8] 0: sub w2, w2, #1 mov w8, #24 adr_l x9, .Lsha3_rcon /* load input */ ld1 {v25.8b-v28.8b}, [x1], #32 ld1 {v29.8b-v31.8b}, [x1], #24 eor v0.8b, v0.8b, v25.8b eor v1.8b, v1.8b, v26.8b eor v2.8b, v2.8b, v27.8b eor v3.8b, v3.8b, v28.8b eor v4.8b, v4.8b, v29.8b eor v5.8b, v5.8b, v30.8b eor v6.8b, v6.8b, v31.8b tbnz x3, #6, 2f // SHA3-512 ld1 {v25.8b-v28.8b}, [x1], #32 ld1 {v29.8b-v30.8b}, [x1], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b eor v9.8b, v9.8b, v27.8b eor v10.8b, v10.8b, v28.8b eor v11.8b, v11.8b, v29.8b eor v12.8b, v12.8b, v30.8b tbnz x3, #4, 1f // SHA3-384 or SHA3-224 // SHA3-256 ld1 {v25.8b-v28.8b}, [x1], #32 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b b 3f 1: tbz x3, #2, 3f // bit 2 cleared? SHA-384 // SHA3-224 ld1 {v25.8b-v28.8b}, [x1], #32 ld1 {v29.8b}, [x1], #8 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b eor v17.8b, v17.8b, v29.8b b 3f // SHA3-512 2: ld1 {v25.8b-v26.8b}, [x1], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b 3: sub w8, w8, #1 eor3 v29.16b, v4.16b, v9.16b, v14.16b eor3 v26.16b, v1.16b, v6.16b, v11.16b eor3 v28.16b, v3.16b, v8.16b, v13.16b eor3 v25.16b, v0.16b, v5.16b, v10.16b eor3 v27.16b, v2.16b, v7.16b, v12.16b eor3 v29.16b, v29.16b, v19.16b, v24.16b eor3 v26.16b, v26.16b, v16.16b, v21.16b eor3 v28.16b, v28.16b, v18.16b, v23.16b eor3 v25.16b, v25.16b, v15.16b, v20.16b eor3 v27.16b, v27.16b, v17.16b, v22.16b rax1 v30.2d, v29.2d, v26.2d // bc[0] rax1 v26.2d, v26.2d, v28.2d // bc[2] rax1 v28.2d, v28.2d, v25.2d // bc[4] rax1 v25.2d, v25.2d, v27.2d // bc[1] rax1 v27.2d, v27.2d, v29.2d // bc[3] eor v0.16b, v0.16b, v30.16b xar v29.2d, v1.2d, v25.2d, (64 - 1) xar v1.2d, v6.2d, v25.2d, (64 - 44) xar v6.2d, v9.2d, v28.2d, (64 - 20) xar v9.2d, v22.2d, v26.2d, (64 - 61) xar v22.2d, v14.2d, v28.2d, (64 - 39) xar v14.2d, v20.2d, v30.2d, (64 - 18) xar v31.2d, v2.2d, v26.2d, (64 - 62) xar v2.2d, v12.2d, v26.2d, (64 - 43) xar v12.2d, v13.2d, v27.2d, (64 - 25) xar v13.2d, v19.2d, v28.2d, (64 - 8) xar v19.2d, v23.2d, v27.2d, (64 - 56) xar v23.2d, v15.2d, v30.2d, (64 - 41) xar v15.2d, v4.2d, v28.2d, (64 - 27) xar v28.2d, v24.2d, v28.2d, (64 - 14) xar v24.2d, v21.2d, v25.2d, (64 - 2) xar v8.2d, v8.2d, v27.2d, (64 - 55) xar v4.2d, v16.2d, v25.2d, (64 - 45) xar v16.2d, v5.2d, v30.2d, (64 - 36) xar v5.2d, v3.2d, v27.2d, (64 - 28) xar v27.2d, v18.2d, v27.2d, (64 - 21) xar v3.2d, v17.2d, v26.2d, (64 - 15) xar v25.2d, v11.2d, v25.2d, (64 - 10) xar v26.2d, v7.2d, v26.2d, (64 - 6) xar v30.2d, v10.2d, v30.2d, (64 - 3) bcax v20.16b, v31.16b, v22.16b, v8.16b bcax v21.16b, v8.16b, v23.16b, v22.16b bcax v22.16b, v22.16b, v24.16b, v23.16b bcax v23.16b, v23.16b, v31.16b, v24.16b bcax v24.16b, v24.16b, v8.16b, v31.16b ld1r {v31.2d}, [x9], #8 bcax v17.16b, v25.16b, v19.16b, v3.16b bcax v18.16b, v3.16b, v15.16b, v19.16b bcax v19.16b, v19.16b, v16.16b, v15.16b bcax v15.16b, v15.16b, v25.16b, v16.16b bcax v16.16b, v16.16b, v3.16b, v25.16b bcax v10.16b, v29.16b, v12.16b, v26.16b bcax v11.16b, v26.16b, v13.16b, v12.16b bcax v12.16b, v12.16b, v14.16b, v13.16b bcax v13.16b, v13.16b, v29.16b, v14.16b bcax v14.16b, v14.16b, v26.16b, v29.16b bcax v7.16b, v30.16b, v9.16b, v4.16b bcax v8.16b, v4.16b, v5.16b, v9.16b bcax v9.16b, v9.16b, v6.16b, v5.16b bcax v5.16b, v5.16b, v30.16b, v6.16b bcax v6.16b, v6.16b, v4.16b, v30.16b bcax v3.16b, v27.16b, v0.16b, v28.16b bcax v4.16b, v28.16b, v1.16b, v0.16b bcax v0.16b, v0.16b, v2.16b, v1.16b bcax v1.16b, v1.16b, v27.16b, v2.16b bcax v2.16b, v2.16b, v28.16b, v27.16b eor v0.16b, v0.16b, v31.16b cbnz w8, 3b cond_yield 4f, x8, x9 cbnz w2, 0b /* save state */ 4: st1 { v0.1d- v3.1d}, [x0], #32 st1 { v4.1d- v7.1d}, [x0], #32 st1 { v8.1d-v11.1d}, [x0], #32 st1 {v12.1d-v15.1d}, [x0], #32 st1 {v16.1d-v19.1d}, [x0], #32 st1 {v20.1d-v23.1d}, [x0], #32 st1 {v24.1d}, [x0] mov w0, w2 ret SYM_FUNC_END(sha3_ce_transform) .section ".rodata", "a" .align 8 .Lsha3_rcon: .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001 .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089 .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080 .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081 .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
aixcc-public/challenge-001-exemplar-source
5,964
arch/arm64/crypto/aes-ce-ccm-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions * * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> .text .arch armv8-a+crypto /* * u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, * u32 macp, u8 const rk[], u32 rounds); */ SYM_FUNC_START(ce_aes_ccm_auth_data) ld1 {v0.16b}, [x0] /* load mac */ cbz w3, 1f sub w3, w3, #16 eor v1.16b, v1.16b, v1.16b 0: ldrb w7, [x1], #1 /* get 1 byte of input */ subs w2, w2, #1 add w3, w3, #1 ins v1.b[0], w7 ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ beq 8f /* out of input? */ cbnz w3, 0b eor v0.16b, v0.16b, v1.16b 1: ld1 {v3.4s}, [x4] /* load first round key */ prfm pldl1strm, [x1] cmp w5, #12 /* which key size? */ add x6, x4, #16 sub w7, w5, #2 /* modified # of rounds */ bmi 2f bne 5f mov v5.16b, v3.16b b 4f 2: mov v4.16b, v3.16b ld1 {v5.4s}, [x6], #16 /* load 2nd round key */ 3: aese v0.16b, v4.16b aesmc v0.16b, v0.16b 4: ld1 {v3.4s}, [x6], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b 5: ld1 {v4.4s}, [x6], #16 /* load next round key */ subs w7, w7, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b ld1 {v5.4s}, [x6], #16 /* load next round key */ bpl 3b aese v0.16b, v4.16b subs w2, w2, #16 /* last data? */ eor v0.16b, v0.16b, v5.16b /* final round */ bmi 6f ld1 {v1.16b}, [x1], #16 /* load next input block */ eor v0.16b, v0.16b, v1.16b /* xor with mac */ bne 1b 6: st1 {v0.16b}, [x0] /* store mac */ beq 10f adds w2, w2, #16 beq 10f mov w3, w2 7: ldrb w7, [x1], #1 umov w6, v0.b[0] eor w6, w6, w7 strb w6, [x0], #1 subs w2, w2, #1 beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b 8: cbz w3, 91f mov w7, w3 add w3, w3, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b 91: eor v0.16b, v0.16b, v1.16b st1 {v0.16b}, [x0] 10: mov w0, w3 ret SYM_FUNC_END(ce_aes_ccm_auth_data) /* * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], * u32 rounds); */ SYM_FUNC_START(ce_aes_ccm_final) ld1 {v3.4s}, [x2], #16 /* load first round key */ ld1 {v0.16b}, [x0] /* load mac */ cmp w3, #12 /* which key size? */ sub w3, w3, #2 /* modified # of rounds */ ld1 {v1.16b}, [x1] /* load 1st ctriv */ bmi 0f bne 3f mov v5.16b, v3.16b b 2f 0: mov v4.16b, v3.16b 1: ld1 {v5.4s}, [x2], #16 /* load next round key */ aese v0.16b, v4.16b aesmc v0.16b, v0.16b aese v1.16b, v4.16b aesmc v1.16b, v1.16b 2: ld1 {v3.4s}, [x2], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b aese v1.16b, v5.16b aesmc v1.16b, v1.16b 3: ld1 {v4.4s}, [x2], #16 /* load next round key */ subs w3, w3, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b aese v1.16b, v3.16b aesmc v1.16b, v1.16b bpl 1b aese v0.16b, v4.16b aese v1.16b, v4.16b /* final round key cancels out */ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ st1 {v0.16b}, [x0] /* store result */ ret SYM_FUNC_END(ce_aes_ccm_final) .macro aes_ccm_do_crypt,enc cbz x2, 5f ldr x8, [x6, #8] /* load lower ctr */ ld1 {v0.16b}, [x5] /* load mac */ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ 0: /* outer loop */ ld1 {v1.8b}, [x6] /* load upper ctr */ prfm pldl1strm, [x1] add x8, x8, #1 rev x9, x8 cmp w4, #12 /* which key size? */ sub w7, w4, #2 /* get modified # of rounds */ ins v1.d[1], x9 /* no carry in lower ctr */ ld1 {v3.4s}, [x3] /* load first round key */ add x10, x3, #16 bmi 1f bne 4f mov v5.16b, v3.16b b 3f 1: mov v4.16b, v3.16b ld1 {v5.4s}, [x10], #16 /* load 2nd round key */ 2: /* inner loop: 3 rounds, 2x interleaved */ aese v0.16b, v4.16b aesmc v0.16b, v0.16b aese v1.16b, v4.16b aesmc v1.16b, v1.16b 3: ld1 {v3.4s}, [x10], #16 /* load next round key */ aese v0.16b, v5.16b aesmc v0.16b, v0.16b aese v1.16b, v5.16b aesmc v1.16b, v1.16b 4: ld1 {v4.4s}, [x10], #16 /* load next round key */ subs w7, w7, #3 aese v0.16b, v3.16b aesmc v0.16b, v0.16b aese v1.16b, v3.16b aesmc v1.16b, v1.16b ld1 {v5.4s}, [x10], #16 /* load next round key */ bpl 2b aese v0.16b, v4.16b aese v1.16b, v4.16b subs w2, w2, #16 bmi 6f /* partial block? */ ld1 {v2.16b}, [x1], #16 /* load next input block */ .if \enc == 1 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ .else eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */ eor v1.16b, v2.16b, v5.16b /* final round enc */ .endif eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ st1 {v1.16b}, [x0], #16 /* write output block */ bne 0b CPU_LE( rev x8, x8 ) st1 {v0.16b}, [x5] /* store mac */ str x8, [x6, #8] /* store lsb end of ctr (BE) */ 5: ret 6: eor v0.16b, v0.16b, v5.16b /* final round mac */ eor v1.16b, v1.16b, v5.16b /* final round enc */ st1 {v0.16b}, [x5] /* store mac */ add w2, w2, #16 /* process partial tail block */ 7: ldrb w9, [x1], #1 /* get 1 byte of input */ umov w6, v1.b[0] /* get top crypted ctr byte */ umov w7, v0.b[0] /* get top mac byte */ .if \enc == 1 eor w7, w7, w9 eor w9, w9, w6 .else eor w9, w9, w6 eor w7, w7, w9 .endif strb w9, [x0], #1 /* store out byte */ strb w7, [x5], #1 /* store mac byte */ subs w2, w2, #1 beq 5b ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ b 7b .endm /* * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, * u8 const rk[], u32 rounds, u8 mac[], * u8 ctr[]); * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, * u8 const rk[], u32 rounds, u8 mac[], * u8 ctr[]); */ SYM_FUNC_START(ce_aes_ccm_encrypt) aes_ccm_do_crypt 1 SYM_FUNC_END(ce_aes_ccm_encrypt) SYM_FUNC_START(ce_aes_ccm_decrypt) aes_ccm_do_crypt 0 SYM_FUNC_END(ce_aes_ccm_decrypt)
aixcc-public/challenge-001-exemplar-source
21,814
arch/arm64/crypto/aes-modes.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES * * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ /* included by aes-ce.S and aes-neon.S */ .text .align 4 #ifndef MAX_STRIDE #define MAX_STRIDE 4 #endif #if MAX_STRIDE == 4 #define ST4(x...) x #define ST5(x...) #else #define ST4(x...) #define ST5(x...) x #endif SYM_FUNC_START_LOCAL(aes_encrypt_block4x) encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 ret SYM_FUNC_END(aes_encrypt_block4x) SYM_FUNC_START_LOCAL(aes_decrypt_block4x) decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 ret SYM_FUNC_END(aes_decrypt_block4x) #if MAX_STRIDE == 5 SYM_FUNC_START_LOCAL(aes_encrypt_block5x) encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 ret SYM_FUNC_END(aes_encrypt_block5x) SYM_FUNC_START_LOCAL(aes_decrypt_block5x) decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 ret SYM_FUNC_END(aes_decrypt_block5x) #endif /* * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) */ AES_FUNC_START(aes_ecb_encrypt) stp x29, x30, [sp, #-16]! mov x29, sp enc_prepare w3, x2, x5 .LecbencloopNx: subs w4, w4, #MAX_STRIDE bmi .Lecbenc1x ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ ST4( bl aes_encrypt_block4x ) ST5( ld1 {v4.16b}, [x1], #16 ) ST5( bl aes_encrypt_block5x ) st1 {v0.16b-v3.16b}, [x0], #64 ST5( st1 {v4.16b}, [x0], #16 ) b .LecbencloopNx .Lecbenc1x: adds w4, w4, #MAX_STRIDE beq .Lecbencout .Lecbencloop: ld1 {v0.16b}, [x1], #16 /* get next pt block */ encrypt_block v0, w3, x2, x5, w6 st1 {v0.16b}, [x0], #16 subs w4, w4, #1 bne .Lecbencloop .Lecbencout: ldp x29, x30, [sp], #16 ret AES_FUNC_END(aes_ecb_encrypt) AES_FUNC_START(aes_ecb_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp dec_prepare w3, x2, x5 .LecbdecloopNx: subs w4, w4, #MAX_STRIDE bmi .Lecbdec1x ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ ST4( bl aes_decrypt_block4x ) ST5( ld1 {v4.16b}, [x1], #16 ) ST5( bl aes_decrypt_block5x ) st1 {v0.16b-v3.16b}, [x0], #64 ST5( st1 {v4.16b}, [x0], #16 ) b .LecbdecloopNx .Lecbdec1x: adds w4, w4, #MAX_STRIDE beq .Lecbdecout .Lecbdecloop: ld1 {v0.16b}, [x1], #16 /* get next ct block */ decrypt_block v0, w3, x2, x5, w6 st1 {v0.16b}, [x0], #16 subs w4, w4, #1 bne .Lecbdecloop .Lecbdecout: ldp x29, x30, [sp], #16 ret AES_FUNC_END(aes_ecb_decrypt) /* * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) * aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[], * int rounds, int blocks, u8 iv[], * u32 const rk2[]); * aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[], * int rounds, int blocks, u8 iv[], * u32 const rk2[]); */ AES_FUNC_START(aes_essiv_cbc_encrypt) ld1 {v4.16b}, [x5] /* get iv */ mov w8, #14 /* AES-256: 14 rounds */ enc_prepare w8, x6, x7 encrypt_block v4, w8, x6, x7, w9 enc_switch_key w3, x2, x6 b .Lcbcencloop4x AES_FUNC_START(aes_cbc_encrypt) ld1 {v4.16b}, [x5] /* get iv */ enc_prepare w3, x2, x6 .Lcbcencloop4x: subs w4, w4, #4 bmi .Lcbcenc1x ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */ encrypt_block v0, w3, x2, x6, w7 eor v1.16b, v1.16b, v0.16b encrypt_block v1, w3, x2, x6, w7 eor v2.16b, v2.16b, v1.16b encrypt_block v2, w3, x2, x6, w7 eor v3.16b, v3.16b, v2.16b encrypt_block v3, w3, x2, x6, w7 st1 {v0.16b-v3.16b}, [x0], #64 mov v4.16b, v3.16b b .Lcbcencloop4x .Lcbcenc1x: adds w4, w4, #4 beq .Lcbcencout .Lcbcencloop: ld1 {v0.16b}, [x1], #16 /* get next pt block */ eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */ encrypt_block v4, w3, x2, x6, w7 st1 {v4.16b}, [x0], #16 subs w4, w4, #1 bne .Lcbcencloop .Lcbcencout: st1 {v4.16b}, [x5] /* return iv */ ret AES_FUNC_END(aes_cbc_encrypt) AES_FUNC_END(aes_essiv_cbc_encrypt) AES_FUNC_START(aes_essiv_cbc_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp ld1 {cbciv.16b}, [x5] /* get iv */ mov w8, #14 /* AES-256: 14 rounds */ enc_prepare w8, x6, x7 encrypt_block cbciv, w8, x6, x7, w9 b .Lessivcbcdecstart AES_FUNC_START(aes_cbc_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp ld1 {cbciv.16b}, [x5] /* get iv */ .Lessivcbcdecstart: dec_prepare w3, x2, x6 .LcbcdecloopNx: subs w4, w4, #MAX_STRIDE bmi .Lcbcdec1x ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ #if MAX_STRIDE == 5 ld1 {v4.16b}, [x1], #16 /* get 1 ct block */ mov v5.16b, v0.16b mov v6.16b, v1.16b mov v7.16b, v2.16b bl aes_decrypt_block5x sub x1, x1, #32 eor v0.16b, v0.16b, cbciv.16b eor v1.16b, v1.16b, v5.16b ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */ ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */ eor v2.16b, v2.16b, v6.16b eor v3.16b, v3.16b, v7.16b eor v4.16b, v4.16b, v5.16b #else mov v4.16b, v0.16b mov v5.16b, v1.16b mov v6.16b, v2.16b bl aes_decrypt_block4x sub x1, x1, #16 eor v0.16b, v0.16b, cbciv.16b eor v1.16b, v1.16b, v4.16b ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */ eor v2.16b, v2.16b, v5.16b eor v3.16b, v3.16b, v6.16b #endif st1 {v0.16b-v3.16b}, [x0], #64 ST5( st1 {v4.16b}, [x0], #16 ) b .LcbcdecloopNx .Lcbcdec1x: adds w4, w4, #MAX_STRIDE beq .Lcbcdecout .Lcbcdecloop: ld1 {v1.16b}, [x1], #16 /* get next ct block */ mov v0.16b, v1.16b /* ...and copy to v0 */ decrypt_block v0, w3, x2, x6, w7 eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */ mov cbciv.16b, v1.16b /* ct is next iv */ st1 {v0.16b}, [x0], #16 subs w4, w4, #1 bne .Lcbcdecloop .Lcbcdecout: st1 {cbciv.16b}, [x5] /* return iv */ ldp x29, x30, [sp], #16 ret AES_FUNC_END(aes_cbc_decrypt) AES_FUNC_END(aes_essiv_cbc_decrypt) /* * aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[], * int rounds, int bytes, u8 const iv[]) * aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], * int rounds, int bytes, u8 const iv[]) */ AES_FUNC_START(aes_cbc_cts_encrypt) adr_l x8, .Lcts_permute_table sub x4, x4, #16 add x9, x8, #32 add x8, x8, x4 sub x9, x9, x4 ld1 {v3.16b}, [x8] ld1 {v4.16b}, [x9] ld1 {v0.16b}, [x1], x4 /* overlapping loads */ ld1 {v1.16b}, [x1] ld1 {v5.16b}, [x5] /* get iv */ enc_prepare w3, x2, x6 eor v0.16b, v0.16b, v5.16b /* xor with iv */ tbl v1.16b, {v1.16b}, v4.16b encrypt_block v0, w3, x2, x6, w7 eor v1.16b, v1.16b, v0.16b tbl v0.16b, {v0.16b}, v3.16b encrypt_block v1, w3, x2, x6, w7 add x4, x0, x4 st1 {v0.16b}, [x4] /* overlapping stores */ st1 {v1.16b}, [x0] ret AES_FUNC_END(aes_cbc_cts_encrypt) AES_FUNC_START(aes_cbc_cts_decrypt) adr_l x8, .Lcts_permute_table sub x4, x4, #16 add x9, x8, #32 add x8, x8, x4 sub x9, x9, x4 ld1 {v3.16b}, [x8] ld1 {v4.16b}, [x9] ld1 {v0.16b}, [x1], x4 /* overlapping loads */ ld1 {v1.16b}, [x1] ld1 {v5.16b}, [x5] /* get iv */ dec_prepare w3, x2, x6 decrypt_block v0, w3, x2, x6, w7 tbl v2.16b, {v0.16b}, v3.16b eor v2.16b, v2.16b, v1.16b tbx v0.16b, {v1.16b}, v4.16b decrypt_block v0, w3, x2, x6, w7 eor v0.16b, v0.16b, v5.16b /* xor with iv */ add x4, x0, x4 st1 {v2.16b}, [x4] /* overlapping stores */ st1 {v0.16b}, [x0] ret AES_FUNC_END(aes_cbc_cts_decrypt) .section ".rodata", "a" .align 6 .Lcts_permute_table: .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .previous /* * This macro generates the code for CTR and XCTR mode. */ .macro ctr_encrypt xctr // Arguments OUT .req x0 IN .req x1 KEY .req x2 ROUNDS_W .req w3 BYTES_W .req w4 IV .req x5 BYTE_CTR_W .req w6 // XCTR only // Intermediate values CTR_W .req w11 // XCTR only CTR .req x11 // XCTR only IV_PART .req x12 BLOCKS .req x13 BLOCKS_W .req w13 stp x29, x30, [sp, #-16]! mov x29, sp enc_prepare ROUNDS_W, KEY, IV_PART ld1 {vctr.16b}, [IV] /* * Keep 64 bits of the IV in a register. For CTR mode this lets us * easily increment the IV. For XCTR mode this lets us efficiently XOR * the 64-bit counter with the IV. */ .if \xctr umov IV_PART, vctr.d[0] lsr CTR_W, BYTE_CTR_W, #4 .else umov IV_PART, vctr.d[1] rev IV_PART, IV_PART .endif .LctrloopNx\xctr: add BLOCKS_W, BYTES_W, #15 sub BYTES_W, BYTES_W, #MAX_STRIDE << 4 lsr BLOCKS_W, BLOCKS_W, #4 mov w8, #MAX_STRIDE cmp BLOCKS_W, w8 csel BLOCKS_W, BLOCKS_W, w8, lt /* * Set up the counter values in v0-v{MAX_STRIDE-1}. * * If we are encrypting less than MAX_STRIDE blocks, the tail block * handling code expects the last keystream block to be in * v{MAX_STRIDE-1}. For example: if encrypting two blocks with * MAX_STRIDE=5, then v3 and v4 should have the next two counter blocks. */ .if \xctr add CTR, CTR, BLOCKS .else adds IV_PART, IV_PART, BLOCKS .endif mov v0.16b, vctr.16b mov v1.16b, vctr.16b mov v2.16b, vctr.16b mov v3.16b, vctr.16b ST5( mov v4.16b, vctr.16b ) .if \xctr sub x6, CTR, #MAX_STRIDE - 1 sub x7, CTR, #MAX_STRIDE - 2 sub x8, CTR, #MAX_STRIDE - 3 sub x9, CTR, #MAX_STRIDE - 4 ST5( sub x10, CTR, #MAX_STRIDE - 5 ) eor x6, x6, IV_PART eor x7, x7, IV_PART eor x8, x8, IV_PART eor x9, x9, IV_PART ST5( eor x10, x10, IV_PART ) mov v0.d[0], x6 mov v1.d[0], x7 mov v2.d[0], x8 mov v3.d[0], x9 ST5( mov v4.d[0], x10 ) .else bcs 0f .subsection 1 /* * This subsection handles carries. * * Conditional branching here is allowed with respect to time * invariance since the branches are dependent on the IV instead * of the plaintext or key. This code is rarely executed in * practice anyway. */ /* Apply carry to outgoing counter. */ 0: umov x8, vctr.d[0] rev x8, x8 add x8, x8, #1 rev x8, x8 ins vctr.d[0], x8 /* * Apply carry to counter blocks if needed. * * Since the carry flag was set, we know 0 <= IV_PART < * MAX_STRIDE. Using the value of IV_PART we can determine how * many counter blocks need to be updated. */ cbz IV_PART, 2f adr x16, 1f sub x16, x16, IV_PART, lsl #3 br x16 bti c mov v0.d[0], vctr.d[0] bti c mov v1.d[0], vctr.d[0] bti c mov v2.d[0], vctr.d[0] bti c mov v3.d[0], vctr.d[0] ST5( bti c ) ST5( mov v4.d[0], vctr.d[0] ) 1: b 2f .previous 2: rev x7, IV_PART ins vctr.d[1], x7 sub x7, IV_PART, #MAX_STRIDE - 1 sub x8, IV_PART, #MAX_STRIDE - 2 sub x9, IV_PART, #MAX_STRIDE - 3 rev x7, x7 rev x8, x8 mov v1.d[1], x7 rev x9, x9 ST5( sub x10, IV_PART, #MAX_STRIDE - 4 ) mov v2.d[1], x8 ST5( rev x10, x10 ) mov v3.d[1], x9 ST5( mov v4.d[1], x10 ) .endif /* * If there are at least MAX_STRIDE blocks left, XOR the data with * keystream and store. Otherwise jump to tail handling. */ tbnz BYTES_W, #31, .Lctrtail\xctr ld1 {v5.16b-v7.16b}, [IN], #48 ST4( bl aes_encrypt_block4x ) ST5( bl aes_encrypt_block5x ) eor v0.16b, v5.16b, v0.16b ST4( ld1 {v5.16b}, [IN], #16 ) eor v1.16b, v6.16b, v1.16b ST5( ld1 {v5.16b-v6.16b}, [IN], #32 ) eor v2.16b, v7.16b, v2.16b eor v3.16b, v5.16b, v3.16b ST5( eor v4.16b, v6.16b, v4.16b ) st1 {v0.16b-v3.16b}, [OUT], #64 ST5( st1 {v4.16b}, [OUT], #16 ) cbz BYTES_W, .Lctrout\xctr b .LctrloopNx\xctr .Lctrout\xctr: .if !\xctr st1 {vctr.16b}, [IV] /* return next CTR value */ .endif ldp x29, x30, [sp], #16 ret .Lctrtail\xctr: /* * Handle up to MAX_STRIDE * 16 - 1 bytes of plaintext * * This code expects the last keystream block to be in v{MAX_STRIDE-1}. * For example: if encrypting two blocks with MAX_STRIDE=5, then v3 and * v4 should have the next two counter blocks. * * This allows us to store the ciphertext by writing to overlapping * regions of memory. Any invalid ciphertext blocks get overwritten by * correctly computed blocks. This approach greatly simplifies the * logic for storing the ciphertext. */ mov x16, #16 ands w7, BYTES_W, #0xf csel x13, x7, x16, ne ST5( cmp BYTES_W, #64 - (MAX_STRIDE << 4)) ST5( csel x14, x16, xzr, gt ) cmp BYTES_W, #48 - (MAX_STRIDE << 4) csel x15, x16, xzr, gt cmp BYTES_W, #32 - (MAX_STRIDE << 4) csel x16, x16, xzr, gt cmp BYTES_W, #16 - (MAX_STRIDE << 4) adr_l x9, .Lcts_permute_table add x9, x9, x13 ble .Lctrtail1x\xctr ST5( ld1 {v5.16b}, [IN], x14 ) ld1 {v6.16b}, [IN], x15 ld1 {v7.16b}, [IN], x16 ST4( bl aes_encrypt_block4x ) ST5( bl aes_encrypt_block5x ) ld1 {v8.16b}, [IN], x13 ld1 {v9.16b}, [IN] ld1 {v10.16b}, [x9] ST4( eor v6.16b, v6.16b, v0.16b ) ST4( eor v7.16b, v7.16b, v1.16b ) ST4( tbl v3.16b, {v3.16b}, v10.16b ) ST4( eor v8.16b, v8.16b, v2.16b ) ST4( eor v9.16b, v9.16b, v3.16b ) ST5( eor v5.16b, v5.16b, v0.16b ) ST5( eor v6.16b, v6.16b, v1.16b ) ST5( tbl v4.16b, {v4.16b}, v10.16b ) ST5( eor v7.16b, v7.16b, v2.16b ) ST5( eor v8.16b, v8.16b, v3.16b ) ST5( eor v9.16b, v9.16b, v4.16b ) ST5( st1 {v5.16b}, [OUT], x14 ) st1 {v6.16b}, [OUT], x15 st1 {v7.16b}, [OUT], x16 add x13, x13, OUT st1 {v9.16b}, [x13] // overlapping stores st1 {v8.16b}, [OUT] b .Lctrout\xctr .Lctrtail1x\xctr: /* * Handle <= 16 bytes of plaintext * * This code always reads and writes 16 bytes. To avoid out of bounds * accesses, XCTR and CTR modes must use a temporary buffer when * encrypting/decrypting less than 16 bytes. * * This code is unusual in that it loads the input and stores the output * relative to the end of the buffers rather than relative to the start. * This causes unusual behaviour when encrypting/decrypting less than 16 * bytes; the end of the data is expected to be at the end of the * temporary buffer rather than the start of the data being at the start * of the temporary buffer. */ sub x8, x7, #16 csel x7, x7, x8, eq add IN, IN, x7 add OUT, OUT, x7 ld1 {v5.16b}, [IN] ld1 {v6.16b}, [OUT] ST5( mov v3.16b, v4.16b ) encrypt_block v3, ROUNDS_W, KEY, x8, w7 ld1 {v10.16b-v11.16b}, [x9] tbl v3.16b, {v3.16b}, v10.16b sshr v11.16b, v11.16b, #7 eor v5.16b, v5.16b, v3.16b bif v5.16b, v6.16b, v11.16b st1 {v5.16b}, [OUT] b .Lctrout\xctr // Arguments .unreq OUT .unreq IN .unreq KEY .unreq ROUNDS_W .unreq BYTES_W .unreq IV .unreq BYTE_CTR_W // XCTR only // Intermediate values .unreq CTR_W // XCTR only .unreq CTR // XCTR only .unreq IV_PART .unreq BLOCKS .unreq BLOCKS_W .endm /* * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int bytes, u8 ctr[]) * * The input and output buffers must always be at least 16 bytes even if * encrypting/decrypting less than 16 bytes. Otherwise out of bounds * accesses will occur. The data to be encrypted/decrypted is expected * to be at the end of this 16-byte temporary buffer rather than the * start. */ AES_FUNC_START(aes_ctr_encrypt) ctr_encrypt 0 AES_FUNC_END(aes_ctr_encrypt) /* * aes_xctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int bytes, u8 const iv[], int byte_ctr) * * The input and output buffers must always be at least 16 bytes even if * encrypting/decrypting less than 16 bytes. Otherwise out of bounds * accesses will occur. The data to be encrypted/decrypted is expected * to be at the end of this 16-byte temporary buffer rather than the * start. */ AES_FUNC_START(aes_xctr_encrypt) ctr_encrypt 1 AES_FUNC_END(aes_xctr_encrypt) /* * aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, * int bytes, u8 const rk2[], u8 iv[], int first) * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, * int bytes, u8 const rk2[], u8 iv[], int first) */ .macro next_tweak, out, in, tmp sshr \tmp\().2d, \in\().2d, #63 and \tmp\().16b, \tmp\().16b, xtsmask.16b add \out\().2d, \in\().2d, \in\().2d ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 eor \out\().16b, \out\().16b, \tmp\().16b .endm .macro xts_load_mask, tmp movi xtsmask.2s, #0x1 movi \tmp\().2s, #0x87 uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s .endm AES_FUNC_START(aes_xts_encrypt) stp x29, x30, [sp, #-16]! mov x29, sp ld1 {v4.16b}, [x6] xts_load_mask v8 cbz w7, .Lxtsencnotfirst enc_prepare w3, x5, x8 xts_cts_skip_tw w7, .LxtsencNx encrypt_block v4, w3, x5, x8, w7 /* first tweak */ enc_switch_key w3, x2, x8 b .LxtsencNx .Lxtsencnotfirst: enc_prepare w3, x2, x8 .LxtsencloopNx: next_tweak v4, v4, v8 .LxtsencNx: subs w4, w4, #64 bmi .Lxtsenc1x ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ next_tweak v5, v4, v8 eor v0.16b, v0.16b, v4.16b next_tweak v6, v5, v8 eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b next_tweak v7, v6, v8 eor v3.16b, v3.16b, v7.16b bl aes_encrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b st1 {v0.16b-v3.16b}, [x0], #64 mov v4.16b, v7.16b cbz w4, .Lxtsencret xts_reload_mask v8 b .LxtsencloopNx .Lxtsenc1x: adds w4, w4, #64 beq .Lxtsencout subs w4, w4, #16 bmi .LxtsencctsNx .Lxtsencloop: ld1 {v0.16b}, [x1], #16 .Lxtsencctsout: eor v0.16b, v0.16b, v4.16b encrypt_block v0, w3, x2, x8, w7 eor v0.16b, v0.16b, v4.16b cbz w4, .Lxtsencout subs w4, w4, #16 next_tweak v4, v4, v8 bmi .Lxtsenccts st1 {v0.16b}, [x0], #16 b .Lxtsencloop .Lxtsencout: st1 {v0.16b}, [x0] .Lxtsencret: st1 {v4.16b}, [x6] ldp x29, x30, [sp], #16 ret .LxtsencctsNx: mov v0.16b, v3.16b sub x0, x0, #16 .Lxtsenccts: adr_l x8, .Lcts_permute_table add x1, x1, w4, sxtw /* rewind input pointer */ add w4, w4, #16 /* # bytes in final block */ add x9, x8, #32 add x8, x8, x4 sub x9, x9, x4 add x4, x0, x4 /* output address of final block */ ld1 {v1.16b}, [x1] /* load final block */ ld1 {v2.16b}, [x8] ld1 {v3.16b}, [x9] tbl v2.16b, {v0.16b}, v2.16b tbx v0.16b, {v1.16b}, v3.16b st1 {v2.16b}, [x4] /* overlapping stores */ mov w4, wzr b .Lxtsencctsout AES_FUNC_END(aes_xts_encrypt) AES_FUNC_START(aes_xts_decrypt) stp x29, x30, [sp, #-16]! mov x29, sp /* subtract 16 bytes if we are doing CTS */ sub w8, w4, #0x10 tst w4, #0xf csel w4, w4, w8, eq ld1 {v4.16b}, [x6] xts_load_mask v8 xts_cts_skip_tw w7, .Lxtsdecskiptw cbz w7, .Lxtsdecnotfirst enc_prepare w3, x5, x8 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ .Lxtsdecskiptw: dec_prepare w3, x2, x8 b .LxtsdecNx .Lxtsdecnotfirst: dec_prepare w3, x2, x8 .LxtsdecloopNx: next_tweak v4, v4, v8 .LxtsdecNx: subs w4, w4, #64 bmi .Lxtsdec1x ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ next_tweak v5, v4, v8 eor v0.16b, v0.16b, v4.16b next_tweak v6, v5, v8 eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b next_tweak v7, v6, v8 eor v3.16b, v3.16b, v7.16b bl aes_decrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b st1 {v0.16b-v3.16b}, [x0], #64 mov v4.16b, v7.16b cbz w4, .Lxtsdecout xts_reload_mask v8 b .LxtsdecloopNx .Lxtsdec1x: adds w4, w4, #64 beq .Lxtsdecout subs w4, w4, #16 .Lxtsdecloop: ld1 {v0.16b}, [x1], #16 bmi .Lxtsdeccts .Lxtsdecctsout: eor v0.16b, v0.16b, v4.16b decrypt_block v0, w3, x2, x8, w7 eor v0.16b, v0.16b, v4.16b st1 {v0.16b}, [x0], #16 cbz w4, .Lxtsdecout subs w4, w4, #16 next_tweak v4, v4, v8 b .Lxtsdecloop .Lxtsdecout: st1 {v4.16b}, [x6] ldp x29, x30, [sp], #16 ret .Lxtsdeccts: adr_l x8, .Lcts_permute_table add x1, x1, w4, sxtw /* rewind input pointer */ add w4, w4, #16 /* # bytes in final block */ add x9, x8, #32 add x8, x8, x4 sub x9, x9, x4 add x4, x0, x4 /* output address of final block */ next_tweak v5, v4, v8 ld1 {v1.16b}, [x1] /* load final block */ ld1 {v2.16b}, [x8] ld1 {v3.16b}, [x9] eor v0.16b, v0.16b, v5.16b decrypt_block v0, w3, x2, x8, w7 eor v0.16b, v0.16b, v5.16b tbl v2.16b, {v0.16b}, v2.16b tbx v0.16b, {v1.16b}, v3.16b st1 {v2.16b}, [x4] /* overlapping stores */ mov w4, wzr b .Lxtsdecctsout AES_FUNC_END(aes_xts_decrypt) /* * aes_mac_update(u8 const in[], u32 const rk[], int rounds, * int blocks, u8 dg[], int enc_before, int enc_after) */ AES_FUNC_START(aes_mac_update) ld1 {v0.16b}, [x4] /* get dg */ enc_prepare w2, x1, x7 cbz w5, .Lmacloop4x encrypt_block v0, w2, x1, x7, w8 .Lmacloop4x: subs w3, w3, #4 bmi .Lmac1x ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ encrypt_block v0, w2, x1, x7, w8 eor v0.16b, v0.16b, v2.16b encrypt_block v0, w2, x1, x7, w8 eor v0.16b, v0.16b, v3.16b encrypt_block v0, w2, x1, x7, w8 eor v0.16b, v0.16b, v4.16b cmp w3, wzr csinv x5, x6, xzr, eq cbz w5, .Lmacout encrypt_block v0, w2, x1, x7, w8 st1 {v0.16b}, [x4] /* return dg */ cond_yield .Lmacout, x7, x8 b .Lmacloop4x .Lmac1x: add w3, w3, #4 .Lmacloop: cbz w3, .Lmacout ld1 {v1.16b}, [x0], #16 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ subs w3, w3, #1 csinv x5, x6, xzr, eq cbz w5, .Lmacout .Lmacenc: encrypt_block v0, w2, x1, x7, w8 b .Lmacloop .Lmacout: st1 {v0.16b}, [x4] /* return dg */ mov w0, w3 ret AES_FUNC_END(aes_mac_update)
aixcc-public/challenge-001-exemplar-source
10,267
arch/arm64/crypto/polyval-ce-core.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Implementation of POLYVAL using ARMv8 Crypto Extensions. * * Copyright 2021 Google LLC */ /* * This is an efficient implementation of POLYVAL using ARMv8 Crypto Extensions * It works on 8 blocks at a time, by precomputing the first 8 keys powers h^8, * ..., h^1 in the POLYVAL finite field. This precomputation allows us to split * finite field multiplication into two steps. * * In the first step, we consider h^i, m_i as normal polynomials of degree less * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication * is simply polynomial multiplication. * * In the second step, we compute the reduction of p(x) modulo the finite field * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. * * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where * multiplication is finite field multiplication. The advantage is that the * two-step process only requires 1 finite field reduction for every 8 * polynomial multiplications. Further parallelism is gained by interleaving the * multiplications and polynomial reductions. */ #include <linux/linkage.h> #define STRIDE_BLOCKS 8 KEY_POWERS .req x0 MSG .req x1 BLOCKS_LEFT .req x2 ACCUMULATOR .req x3 KEY_START .req x10 EXTRA_BYTES .req x11 TMP .req x13 M0 .req v0 M1 .req v1 M2 .req v2 M3 .req v3 M4 .req v4 M5 .req v5 M6 .req v6 M7 .req v7 KEY8 .req v8 KEY7 .req v9 KEY6 .req v10 KEY5 .req v11 KEY4 .req v12 KEY3 .req v13 KEY2 .req v14 KEY1 .req v15 PL .req v16 PH .req v17 TMP_V .req v18 LO .req v20 MI .req v21 HI .req v22 SUM .req v23 GSTAR .req v24 .text .arch armv8-a+crypto .align 4 .Lgstar: .quad 0xc200000000000000, 0xc200000000000000 /* * Computes the product of two 128-bit polynomials in X and Y and XORs the * components of the 256-bit product into LO, MI, HI. * * Given: * X = [X_1 : X_0] * Y = [Y_1 : Y_0] * * We compute: * LO += X_0 * Y_0 * MI += (X_0 + X_1) * (Y_0 + Y_1) * HI += X_1 * Y_1 * * Later, the 256-bit result can be extracted as: * [HI_1 : HI_0 + HI_1 + MI_1 + LO_1 : LO_1 + HI_0 + MI_0 + LO_0 : LO_0] * This step is done when computing the polynomial reduction for efficiency * reasons. * * Karatsuba multiplication is used instead of Schoolbook multiplication because * it was found to be slightly faster on ARM64 CPUs. * */ .macro karatsuba1 X Y X .req \X Y .req \Y ext v25.16b, X.16b, X.16b, #8 ext v26.16b, Y.16b, Y.16b, #8 eor v25.16b, v25.16b, X.16b eor v26.16b, v26.16b, Y.16b pmull2 v28.1q, X.2d, Y.2d pmull v29.1q, X.1d, Y.1d pmull v27.1q, v25.1d, v26.1d eor HI.16b, HI.16b, v28.16b eor LO.16b, LO.16b, v29.16b eor MI.16b, MI.16b, v27.16b .unreq X .unreq Y .endm /* * Same as karatsuba1, except overwrites HI, LO, MI rather than XORing into * them. */ .macro karatsuba1_store X Y X .req \X Y .req \Y ext v25.16b, X.16b, X.16b, #8 ext v26.16b, Y.16b, Y.16b, #8 eor v25.16b, v25.16b, X.16b eor v26.16b, v26.16b, Y.16b pmull2 HI.1q, X.2d, Y.2d pmull LO.1q, X.1d, Y.1d pmull MI.1q, v25.1d, v26.1d .unreq X .unreq Y .endm /* * Computes the 256-bit polynomial represented by LO, HI, MI. Stores * the result in PL, PH. * [PH : PL] = * [HI_1 : HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0 : LO_0] */ .macro karatsuba2 // v4 = [HI_1 + MI_1 : HI_0 + MI_0] eor v4.16b, HI.16b, MI.16b // v4 = [HI_1 + MI_1 + LO_1 : HI_0 + MI_0 + LO_0] eor v4.16b, v4.16b, LO.16b // v5 = [HI_0 : LO_1] ext v5.16b, LO.16b, HI.16b, #8 // v4 = [HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0] eor v4.16b, v4.16b, v5.16b // HI = [HI_0 : HI_1] ext HI.16b, HI.16b, HI.16b, #8 // LO = [LO_0 : LO_1] ext LO.16b, LO.16b, LO.16b, #8 // PH = [HI_1 : HI_1 + HI_0 + MI_1 + LO_1] ext PH.16b, v4.16b, HI.16b, #8 // PL = [HI_0 + MI_0 + LO_1 + LO_0 : LO_0] ext PL.16b, LO.16b, v4.16b, #8 .endm /* * Computes the 128-bit reduction of PH : PL. Stores the result in dest. * * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = * x^128 + x^127 + x^126 + x^121 + 1. * * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the * product of two 128-bit polynomials in Montgomery form. We need to reduce it * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor * of x^128, this product has two extra factors of x^128. To get it back into * Montgomery form, we need to remove one of these factors by dividing by x^128. * * To accomplish both of these goals, we add multiples of g(x) that cancel out * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low * bits are zero, the polynomial division by x^128 can be done by right * shifting. * * Since the only nonzero term in the low 64 bits of g(x) is the constant term, * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. * * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). * * So our final computation is: * T = T_1 : T_0 = g*(x) * P_0 * V = V_1 : V_0 = g*(x) * (P_1 + T_0) * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 * * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. */ .macro montgomery_reduction dest DEST .req \dest // TMP_V = T_1 : T_0 = P_0 * g*(x) pmull TMP_V.1q, PL.1d, GSTAR.1d // TMP_V = T_0 : T_1 ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 // TMP_V = P_1 + T_0 : P_0 + T_1 eor TMP_V.16b, PL.16b, TMP_V.16b // PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 eor PH.16b, PH.16b, TMP_V.16b // TMP_V = V_1 : V_0 = (P_1 + T_0) * g*(x) pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d eor DEST.16b, PH.16b, TMP_V.16b .unreq DEST .endm /* * Compute Polyval on 8 blocks. * * If reduce is set, also computes the montgomery reduction of the * previous full_stride call and XORs with the first message block. * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. * * Sets PL, PH. */ .macro full_stride reduce eor LO.16b, LO.16b, LO.16b eor MI.16b, MI.16b, MI.16b eor HI.16b, HI.16b, HI.16b ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 ld1 {M4.16b, M5.16b, M6.16b, M7.16b}, [MSG], #64 karatsuba1 M7 KEY1 .if \reduce pmull TMP_V.1q, PL.1d, GSTAR.1d .endif karatsuba1 M6 KEY2 .if \reduce ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 .endif karatsuba1 M5 KEY3 .if \reduce eor TMP_V.16b, PL.16b, TMP_V.16b .endif karatsuba1 M4 KEY4 .if \reduce eor PH.16b, PH.16b, TMP_V.16b .endif karatsuba1 M3 KEY5 .if \reduce pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d .endif karatsuba1 M2 KEY6 .if \reduce eor SUM.16b, PH.16b, TMP_V.16b .endif karatsuba1 M1 KEY7 eor M0.16b, M0.16b, SUM.16b karatsuba1 M0 KEY8 karatsuba2 .endm /* * Handle any extra blocks after full_stride loop. */ .macro partial_stride add KEY_POWERS, KEY_START, #(STRIDE_BLOCKS << 4) sub KEY_POWERS, KEY_POWERS, BLOCKS_LEFT, lsl #4 ld1 {KEY1.16b}, [KEY_POWERS], #16 ld1 {TMP_V.16b}, [MSG], #16 eor SUM.16b, SUM.16b, TMP_V.16b karatsuba1_store KEY1 SUM sub BLOCKS_LEFT, BLOCKS_LEFT, #1 tst BLOCKS_LEFT, #4 beq .Lpartial4BlocksDone ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 karatsuba1 M0 KEY8 karatsuba1 M1 KEY7 karatsuba1 M2 KEY6 karatsuba1 M3 KEY5 .Lpartial4BlocksDone: tst BLOCKS_LEFT, #2 beq .Lpartial2BlocksDone ld1 {M0.16b, M1.16b}, [MSG], #32 ld1 {KEY8.16b, KEY7.16b}, [KEY_POWERS], #32 karatsuba1 M0 KEY8 karatsuba1 M1 KEY7 .Lpartial2BlocksDone: tst BLOCKS_LEFT, #1 beq .LpartialDone ld1 {M0.16b}, [MSG], #16 ld1 {KEY8.16b}, [KEY_POWERS], #16 karatsuba1 M0 KEY8 .LpartialDone: karatsuba2 montgomery_reduction SUM .endm /* * Perform montgomery multiplication in GF(2^128) and store result in op1. * * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 * If op1, op2 are in montgomery form, this computes the montgomery * form of op1*op2. * * void pmull_polyval_mul(u8 *op1, const u8 *op2); */ SYM_FUNC_START(pmull_polyval_mul) adr TMP, .Lgstar ld1 {GSTAR.2d}, [TMP] ld1 {v0.16b}, [x0] ld1 {v1.16b}, [x1] karatsuba1_store v0 v1 karatsuba2 montgomery_reduction SUM st1 {SUM.16b}, [x0] ret SYM_FUNC_END(pmull_polyval_mul) /* * Perform polynomial evaluation as specified by POLYVAL. This computes: * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} * where n=nblocks, h is the hash key, and m_i are the message blocks. * * x0 - pointer to precomputed key powers h^8 ... h^1 * x1 - pointer to message blocks * x2 - number of blocks to hash * x3 - pointer to accumulator * * void pmull_polyval_update(const struct polyval_ctx *ctx, const u8 *in, * size_t nblocks, u8 *accumulator); */ SYM_FUNC_START(pmull_polyval_update) adr TMP, .Lgstar mov KEY_START, KEY_POWERS ld1 {GSTAR.2d}, [TMP] ld1 {SUM.16b}, [ACCUMULATOR] subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS blt .LstrideLoopExit ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 ld1 {KEY4.16b, KEY3.16b, KEY2.16b, KEY1.16b}, [KEY_POWERS], #64 full_stride 0 subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS blt .LstrideLoopExitReduce .LstrideLoop: full_stride 1 subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS bge .LstrideLoop .LstrideLoopExitReduce: montgomery_reduction SUM .LstrideLoopExit: adds BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS beq .LskipPartial partial_stride .LskipPartial: st1 {SUM.16b}, [ACCUMULATOR] ret SYM_FUNC_END(pmull_polyval_update)
aixcc-public/challenge-001-exemplar-source
3,809
arch/arm64/crypto/aes-ce.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with * Crypto Extensions * * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> #define AES_FUNC_START(func) SYM_FUNC_START(ce_ ## func) #define AES_FUNC_END(func) SYM_FUNC_END(ce_ ## func) .arch armv8-a+crypto xtsmask .req v16 cbciv .req v16 vctr .req v16 .macro xts_reload_mask, tmp .endm .macro xts_cts_skip_tw, reg, lbl .endm /* preload all round keys */ .macro load_round_keys, rounds, rk cmp \rounds, #12 blo 2222f /* 128 bits */ beq 1111f /* 192 bits */ ld1 {v17.4s-v18.4s}, [\rk], #32 1111: ld1 {v19.4s-v20.4s}, [\rk], #32 2222: ld1 {v21.4s-v24.4s}, [\rk], #64 ld1 {v25.4s-v28.4s}, [\rk], #64 ld1 {v29.4s-v31.4s}, [\rk] .endm /* prepare for encryption with key in rk[] */ .macro enc_prepare, rounds, rk, temp mov \temp, \rk load_round_keys \rounds, \temp .endm /* prepare for encryption (again) but with new key in rk[] */ .macro enc_switch_key, rounds, rk, temp mov \temp, \rk load_round_keys \rounds, \temp .endm /* prepare for decryption with key in rk[] */ .macro dec_prepare, rounds, rk, temp mov \temp, \rk load_round_keys \rounds, \temp .endm .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3, i4 aes\de \i0\().16b, \k\().16b aes\mc \i0\().16b, \i0\().16b .ifnb \i1 aes\de \i1\().16b, \k\().16b aes\mc \i1\().16b, \i1\().16b .ifnb \i3 aes\de \i2\().16b, \k\().16b aes\mc \i2\().16b, \i2\().16b aes\de \i3\().16b, \k\().16b aes\mc \i3\().16b, \i3\().16b .ifnb \i4 aes\de \i4\().16b, \k\().16b aes\mc \i4\().16b, \i4\().16b .endif .endif .endif .endm /* up to 5 interleaved encryption rounds with the same round key */ .macro round_Nx, enc, k, i0, i1, i2, i3, i4 .ifc \enc, e do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3, \i4 .else do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3, \i4 .endif .endm /* up to 5 interleaved final rounds */ .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3, i4 aes\de \i0\().16b, \k\().16b .ifnb \i1 aes\de \i1\().16b, \k\().16b .ifnb \i3 aes\de \i2\().16b, \k\().16b aes\de \i3\().16b, \k\().16b .ifnb \i4 aes\de \i4\().16b, \k\().16b .endif .endif .endif eor \i0\().16b, \i0\().16b, \k2\().16b .ifnb \i1 eor \i1\().16b, \i1\().16b, \k2\().16b .ifnb \i3 eor \i2\().16b, \i2\().16b, \k2\().16b eor \i3\().16b, \i3\().16b, \k2\().16b .ifnb \i4 eor \i4\().16b, \i4\().16b, \k2\().16b .endif .endif .endif .endm /* up to 5 interleaved blocks */ .macro do_block_Nx, enc, rounds, i0, i1, i2, i3, i4 cmp \rounds, #12 blo 2222f /* 128 bits */ beq 1111f /* 192 bits */ round_Nx \enc, v17, \i0, \i1, \i2, \i3, \i4 round_Nx \enc, v18, \i0, \i1, \i2, \i3, \i4 1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3, \i4 round_Nx \enc, v20, \i0, \i1, \i2, \i3, \i4 2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29 round_Nx \enc, \key, \i0, \i1, \i2, \i3, \i4 .endr fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3, \i4 .endm .macro encrypt_block, in, rounds, t0, t1, t2 do_block_Nx e, \rounds, \in .endm .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 do_block_Nx e, \rounds, \i0, \i1, \i2, \i3 .endm .macro encrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2 do_block_Nx e, \rounds, \i0, \i1, \i2, \i3, \i4 .endm .macro decrypt_block, in, rounds, t0, t1, t2 do_block_Nx d, \rounds, \in .endm .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 do_block_Nx d, \rounds, \i0, \i1, \i2, \i3 .endm .macro decrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2 do_block_Nx d, \rounds, \i0, \i1, \i2, \i3, \i4 .endm #define MAX_STRIDE 5 #include "aes-modes.S"
aixcc-public/challenge-001-exemplar-source
17,570
arch/arm64/crypto/ghash-ce-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/assembler.h> SHASH .req v0 SHASH2 .req v1 T1 .req v2 T2 .req v3 MASK .req v4 XM .req v5 XL .req v6 XH .req v7 IN1 .req v7 k00_16 .req v8 k32_48 .req v9 t3 .req v10 t4 .req v11 t5 .req v12 t6 .req v13 t7 .req v14 t8 .req v15 t9 .req v16 perm1 .req v17 perm2 .req v18 perm3 .req v19 sh1 .req v20 sh2 .req v21 sh3 .req v22 sh4 .req v23 ss1 .req v24 ss2 .req v25 ss3 .req v26 ss4 .req v27 XL2 .req v8 XM2 .req v9 XH2 .req v10 XL3 .req v11 XM3 .req v12 XH3 .req v13 TT3 .req v14 TT4 .req v15 HH .req v16 HH3 .req v17 HH4 .req v18 HH34 .req v19 .text .arch armv8-a+crypto .macro __pmull_p64, rd, rn, rm pmull \rd\().1q, \rn\().1d, \rm\().1d .endm .macro __pmull2_p64, rd, rn, rm pmull2 \rd\().1q, \rn\().2d, \rm\().2d .endm .macro __pmull_p8, rq, ad, bd ext t3.8b, \ad\().8b, \ad\().8b, #1 // A1 ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2 ext t7.8b, \ad\().8b, \ad\().8b, #3 // A3 __pmull_p8_\bd \rq, \ad .endm .macro __pmull2_p8, rq, ad, bd tbl t3.16b, {\ad\().16b}, perm1.16b // A1 tbl t5.16b, {\ad\().16b}, perm2.16b // A2 tbl t7.16b, {\ad\().16b}, perm3.16b // A3 __pmull2_p8_\bd \rq, \ad .endm .macro __pmull_p8_SHASH, rq, ad __pmull_p8_tail \rq, \ad\().8b, SHASH.8b, 8b,, sh1, sh2, sh3, sh4 .endm .macro __pmull_p8_SHASH2, rq, ad __pmull_p8_tail \rq, \ad\().8b, SHASH2.8b, 8b,, ss1, ss2, ss3, ss4 .endm .macro __pmull2_p8_SHASH, rq, ad __pmull_p8_tail \rq, \ad\().16b, SHASH.16b, 16b, 2, sh1, sh2, sh3, sh4 .endm .macro __pmull_p8_tail, rq, ad, bd, nb, t, b1, b2, b3, b4 pmull\t t3.8h, t3.\nb, \bd // F = A1*B pmull\t t4.8h, \ad, \b1\().\nb // E = A*B1 pmull\t t5.8h, t5.\nb, \bd // H = A2*B pmull\t t6.8h, \ad, \b2\().\nb // G = A*B2 pmull\t t7.8h, t7.\nb, \bd // J = A3*B pmull\t t8.8h, \ad, \b3\().\nb // I = A*B3 pmull\t t9.8h, \ad, \b4\().\nb // K = A*B4 pmull\t \rq\().8h, \ad, \bd // D = A*B eor t3.16b, t3.16b, t4.16b // L = E + F eor t5.16b, t5.16b, t6.16b // M = G + H eor t7.16b, t7.16b, t8.16b // N = I + J uzp1 t4.2d, t3.2d, t5.2d uzp2 t3.2d, t3.2d, t5.2d uzp1 t6.2d, t7.2d, t9.2d uzp2 t7.2d, t7.2d, t9.2d // t3 = (L) (P0 + P1) << 8 // t5 = (M) (P2 + P3) << 16 eor t4.16b, t4.16b, t3.16b and t3.16b, t3.16b, k32_48.16b // t7 = (N) (P4 + P5) << 24 // t9 = (K) (P6 + P7) << 32 eor t6.16b, t6.16b, t7.16b and t7.16b, t7.16b, k00_16.16b eor t4.16b, t4.16b, t3.16b eor t6.16b, t6.16b, t7.16b zip2 t5.2d, t4.2d, t3.2d zip1 t3.2d, t4.2d, t3.2d zip2 t9.2d, t6.2d, t7.2d zip1 t7.2d, t6.2d, t7.2d ext t3.16b, t3.16b, t3.16b, #15 ext t5.16b, t5.16b, t5.16b, #14 ext t7.16b, t7.16b, t7.16b, #13 ext t9.16b, t9.16b, t9.16b, #12 eor t3.16b, t3.16b, t5.16b eor t7.16b, t7.16b, t9.16b eor \rq\().16b, \rq\().16b, t3.16b eor \rq\().16b, \rq\().16b, t7.16b .endm .macro __pmull_pre_p64 add x8, x3, #16 ld1 {HH.2d-HH4.2d}, [x8] trn1 SHASH2.2d, SHASH.2d, HH.2d trn2 T1.2d, SHASH.2d, HH.2d eor SHASH2.16b, SHASH2.16b, T1.16b trn1 HH34.2d, HH3.2d, HH4.2d trn2 T1.2d, HH3.2d, HH4.2d eor HH34.16b, HH34.16b, T1.16b movi MASK.16b, #0xe1 shl MASK.2d, MASK.2d, #57 .endm .macro __pmull_pre_p8 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 eor SHASH2.16b, SHASH2.16b, SHASH.16b // k00_16 := 0x0000000000000000_000000000000ffff // k32_48 := 0x00000000ffffffff_0000ffffffffffff movi k32_48.2d, #0xffffffff mov k32_48.h[2], k32_48.h[0] ushr k00_16.2d, k32_48.2d, #32 // prepare the permutation vectors mov_q x5, 0x080f0e0d0c0b0a09 movi T1.8b, #8 dup perm1.2d, x5 eor perm1.16b, perm1.16b, T1.16b ushr perm2.2d, perm1.2d, #8 ushr perm3.2d, perm1.2d, #16 ushr T1.2d, perm1.2d, #24 sli perm2.2d, perm1.2d, #56 sli perm3.2d, perm1.2d, #48 sli T1.2d, perm1.2d, #40 // precompute loop invariants tbl sh1.16b, {SHASH.16b}, perm1.16b tbl sh2.16b, {SHASH.16b}, perm2.16b tbl sh3.16b, {SHASH.16b}, perm3.16b tbl sh4.16b, {SHASH.16b}, T1.16b ext ss1.8b, SHASH2.8b, SHASH2.8b, #1 ext ss2.8b, SHASH2.8b, SHASH2.8b, #2 ext ss3.8b, SHASH2.8b, SHASH2.8b, #3 ext ss4.8b, SHASH2.8b, SHASH2.8b, #4 .endm // // PMULL (64x64->128) based reduction for CPUs that can do // it in a single instruction. // .macro __pmull_reduce_p64 pmull T2.1q, XL.1d, MASK.1d eor XM.16b, XM.16b, T1.16b mov XH.d[0], XM.d[1] mov XM.d[1], XL.d[0] eor XL.16b, XM.16b, T2.16b ext T2.16b, XL.16b, XL.16b, #8 pmull XL.1q, XL.1d, MASK.1d .endm // // Alternative reduction for CPUs that lack support for the // 64x64->128 PMULL instruction // .macro __pmull_reduce_p8 eor XM.16b, XM.16b, T1.16b mov XL.d[1], XM.d[0] mov XH.d[0], XM.d[1] shl T1.2d, XL.2d, #57 shl T2.2d, XL.2d, #62 eor T2.16b, T2.16b, T1.16b shl T1.2d, XL.2d, #63 eor T2.16b, T2.16b, T1.16b ext T1.16b, XL.16b, XH.16b, #8 eor T2.16b, T2.16b, T1.16b mov XL.d[1], T2.d[0] mov XH.d[0], T2.d[1] ushr T2.2d, XL.2d, #1 eor XH.16b, XH.16b, XL.16b eor XL.16b, XL.16b, T2.16b ushr T2.2d, T2.2d, #6 ushr XL.2d, XL.2d, #1 .endm .macro __pmull_ghash, pn ld1 {SHASH.2d}, [x3] ld1 {XL.2d}, [x1] __pmull_pre_\pn /* do the head block first, if supplied */ cbz x4, 0f ld1 {T1.2d}, [x4] mov x4, xzr b 3f 0: .ifc \pn, p64 tbnz w0, #0, 2f // skip until #blocks is a tbnz w0, #1, 2f // round multiple of 4 1: ld1 {XM3.16b-TT4.16b}, [x2], #64 sub w0, w0, #4 rev64 T1.16b, XM3.16b rev64 T2.16b, XH3.16b rev64 TT4.16b, TT4.16b rev64 TT3.16b, TT3.16b ext IN1.16b, TT4.16b, TT4.16b, #8 ext XL3.16b, TT3.16b, TT3.16b, #8 eor TT4.16b, TT4.16b, IN1.16b pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1 pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0 pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0) eor TT3.16b, TT3.16b, XL3.16b pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1 pmull XL3.1q, HH.1d, XL3.1d // a0 * b0 pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0) ext IN1.16b, T2.16b, T2.16b, #8 eor XL2.16b, XL2.16b, XL3.16b eor XH2.16b, XH2.16b, XH3.16b eor XM2.16b, XM2.16b, XM3.16b eor T2.16b, T2.16b, IN1.16b pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1 pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0 pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0) eor XL2.16b, XL2.16b, XL3.16b eor XH2.16b, XH2.16b, XH3.16b eor XM2.16b, XM2.16b, XM3.16b ext IN1.16b, T1.16b, T1.16b, #8 ext TT3.16b, XL.16b, XL.16b, #8 eor XL.16b, XL.16b, IN1.16b eor T1.16b, T1.16b, TT3.16b pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1 eor T1.16b, T1.16b, XL.16b pmull XL.1q, HH4.1d, XL.1d // a0 * b0 pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0) eor XL.16b, XL.16b, XL2.16b eor XH.16b, XH.16b, XH2.16b eor XM.16b, XM.16b, XM2.16b eor T2.16b, XL.16b, XH.16b ext T1.16b, XL.16b, XH.16b, #8 eor XM.16b, XM.16b, T2.16b __pmull_reduce_p64 eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b cbz w0, 5f b 1b .endif 2: ld1 {T1.2d}, [x2], #16 sub w0, w0, #1 3: /* multiply XL by SHASH in GF(2^128) */ CPU_LE( rev64 T1.16b, T1.16b ) ext T2.16b, XL.16b, XL.16b, #8 ext IN1.16b, T1.16b, T1.16b, #8 eor T1.16b, T1.16b, T2.16b eor XL.16b, XL.16b, IN1.16b __pmull2_\pn XH, XL, SHASH // a1 * b1 eor T1.16b, T1.16b, XL.16b __pmull_\pn XL, XL, SHASH // a0 * b0 __pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0) 4: eor T2.16b, XL.16b, XH.16b ext T1.16b, XL.16b, XH.16b, #8 eor XM.16b, XM.16b, T2.16b __pmull_reduce_\pn eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b cbnz w0, 0b 5: st1 {XL.2d}, [x1] ret .endm /* * void pmull_ghash_update(int blocks, u64 dg[], const char *src, * struct ghash_key const *k, const char *head) */ SYM_TYPED_FUNC_START(pmull_ghash_update_p64) __pmull_ghash p64 SYM_FUNC_END(pmull_ghash_update_p64) SYM_TYPED_FUNC_START(pmull_ghash_update_p8) __pmull_ghash p8 SYM_FUNC_END(pmull_ghash_update_p8) KS0 .req v8 KS1 .req v9 KS2 .req v10 KS3 .req v11 INP0 .req v21 INP1 .req v22 INP2 .req v23 INP3 .req v24 K0 .req v25 K1 .req v26 K2 .req v27 K3 .req v28 K4 .req v12 K5 .req v13 K6 .req v4 K7 .req v5 K8 .req v14 K9 .req v15 KK .req v29 KL .req v30 KM .req v31 .macro load_round_keys, rounds, rk, tmp add \tmp, \rk, #64 ld1 {K0.4s-K3.4s}, [\rk] ld1 {K4.4s-K5.4s}, [\tmp] add \tmp, \rk, \rounds, lsl #4 sub \tmp, \tmp, #32 ld1 {KK.4s-KM.4s}, [\tmp] .endm .macro enc_round, state, key aese \state\().16b, \key\().16b aesmc \state\().16b, \state\().16b .endm .macro enc_qround, s0, s1, s2, s3, key enc_round \s0, \key enc_round \s1, \key enc_round \s2, \key enc_round \s3, \key .endm .macro enc_block, state, rounds, rk, tmp add \tmp, \rk, #96 ld1 {K6.4s-K7.4s}, [\tmp], #32 .irp key, K0, K1, K2, K3, K4 K5 enc_round \state, \key .endr tbnz \rounds, #2, .Lnot128_\@ .Lout256_\@: enc_round \state, K6 enc_round \state, K7 .Lout192_\@: enc_round \state, KK aese \state\().16b, KL.16b eor \state\().16b, \state\().16b, KM.16b .subsection 1 .Lnot128_\@: ld1 {K8.4s-K9.4s}, [\tmp], #32 enc_round \state, K6 enc_round \state, K7 ld1 {K6.4s-K7.4s}, [\tmp] enc_round \state, K8 enc_round \state, K9 tbz \rounds, #1, .Lout192_\@ b .Lout256_\@ .previous .endm .align 6 .macro pmull_gcm_do_crypt, enc stp x29, x30, [sp, #-32]! mov x29, sp str x19, [sp, #24] load_round_keys x7, x6, x8 ld1 {SHASH.2d}, [x3], #16 ld1 {HH.2d-HH4.2d}, [x3] trn1 SHASH2.2d, SHASH.2d, HH.2d trn2 T1.2d, SHASH.2d, HH.2d eor SHASH2.16b, SHASH2.16b, T1.16b trn1 HH34.2d, HH3.2d, HH4.2d trn2 T1.2d, HH3.2d, HH4.2d eor HH34.16b, HH34.16b, T1.16b ld1 {XL.2d}, [x4] cbz x0, 3f // tag only? ldr w8, [x5, #12] // load lower counter CPU_LE( rev w8, w8 ) 0: mov w9, #4 // max blocks per round add x10, x0, #0xf lsr x10, x10, #4 // remaining blocks subs x0, x0, #64 csel w9, w10, w9, mi add w8, w8, w9 bmi 1f ld1 {INP0.16b-INP3.16b}, [x2], #64 .subsection 1 /* * Populate the four input registers right to left with up to 63 bytes * of data, using overlapping loads to avoid branches. * * INP0 INP1 INP2 INP3 * 1 byte | | | |x | * 16 bytes | | | |xxxxxxxx| * 17 bytes | | |xxxxxxxx|x | * 47 bytes | |xxxxxxxx|xxxxxxxx|xxxxxxx | * etc etc * * Note that this code may read up to 15 bytes before the start of * the input. It is up to the calling code to ensure this is safe if * this happens in the first iteration of the loop (i.e., when the * input size is < 16 bytes) */ 1: mov x15, #16 ands x19, x0, #0xf csel x19, x19, x15, ne adr_l x17, .Lpermute_table + 16 sub x11, x15, x19 add x12, x17, x11 sub x17, x17, x11 ld1 {T1.16b}, [x12] sub x10, x1, x11 sub x11, x2, x11 cmp x0, #-16 csel x14, x15, xzr, gt cmp x0, #-32 csel x15, x15, xzr, gt cmp x0, #-48 csel x16, x19, xzr, gt csel x1, x1, x10, gt csel x2, x2, x11, gt ld1 {INP0.16b}, [x2], x14 ld1 {INP1.16b}, [x2], x15 ld1 {INP2.16b}, [x2], x16 ld1 {INP3.16b}, [x2] tbl INP3.16b, {INP3.16b}, T1.16b b 2f .previous 2: .if \enc == 0 bl pmull_gcm_ghash_4x .endif bl pmull_gcm_enc_4x tbnz x0, #63, 6f st1 {INP0.16b-INP3.16b}, [x1], #64 .if \enc == 1 bl pmull_gcm_ghash_4x .endif bne 0b 3: ldp x19, x10, [sp, #24] cbz x10, 5f // output tag? ld1 {INP3.16b}, [x10] // load lengths[] mov w9, #1 bl pmull_gcm_ghash_4x mov w11, #(0x1 << 24) // BE '1U' ld1 {KS0.16b}, [x5] mov KS0.s[3], w11 enc_block KS0, x7, x6, x12 ext XL.16b, XL.16b, XL.16b, #8 rev64 XL.16b, XL.16b eor XL.16b, XL.16b, KS0.16b .if \enc == 1 st1 {XL.16b}, [x10] // store tag .else ldp x11, x12, [sp, #40] // load tag pointer and authsize adr_l x17, .Lpermute_table ld1 {KS0.16b}, [x11] // load supplied tag add x17, x17, x12 ld1 {KS1.16b}, [x17] // load permute vector cmeq XL.16b, XL.16b, KS0.16b // compare tags mvn XL.16b, XL.16b // -1 for fail, 0 for pass tbl XL.16b, {XL.16b}, KS1.16b // keep authsize bytes only sminv b0, XL.16b // signed minimum across XL smov w0, v0.b[0] // return b0 .endif 4: ldp x29, x30, [sp], #32 ret 5: CPU_LE( rev w8, w8 ) str w8, [x5, #12] // store lower counter st1 {XL.2d}, [x4] b 4b 6: ld1 {T1.16b-T2.16b}, [x17], #32 // permute vectors sub x17, x17, x19, lsl #1 cmp w9, #1 beq 7f .subsection 1 7: ld1 {INP2.16b}, [x1] tbx INP2.16b, {INP3.16b}, T1.16b mov INP3.16b, INP2.16b b 8f .previous st1 {INP0.16b}, [x1], x14 st1 {INP1.16b}, [x1], x15 st1 {INP2.16b}, [x1], x16 tbl INP3.16b, {INP3.16b}, T1.16b tbx INP3.16b, {INP2.16b}, T2.16b 8: st1 {INP3.16b}, [x1] .if \enc == 1 ld1 {T1.16b}, [x17] tbl INP3.16b, {INP3.16b}, T1.16b // clear non-data bits bl pmull_gcm_ghash_4x .endif b 3b .endm /* * void pmull_gcm_encrypt(int blocks, u8 dst[], const u8 src[], * struct ghash_key const *k, u64 dg[], u8 ctr[], * int rounds, u8 tag) */ SYM_FUNC_START(pmull_gcm_encrypt) pmull_gcm_do_crypt 1 SYM_FUNC_END(pmull_gcm_encrypt) /* * void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[], * struct ghash_key const *k, u64 dg[], u8 ctr[], * int rounds, u8 tag) */ SYM_FUNC_START(pmull_gcm_decrypt) pmull_gcm_do_crypt 0 SYM_FUNC_END(pmull_gcm_decrypt) SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x) movi MASK.16b, #0xe1 shl MASK.2d, MASK.2d, #57 rev64 T1.16b, INP0.16b rev64 T2.16b, INP1.16b rev64 TT3.16b, INP2.16b rev64 TT4.16b, INP3.16b ext XL.16b, XL.16b, XL.16b, #8 tbz w9, #2, 0f // <4 blocks? .subsection 1 0: movi XH2.16b, #0 movi XM2.16b, #0 movi XL2.16b, #0 tbz w9, #0, 1f // 2 blocks? tbz w9, #1, 2f // 1 block? eor T2.16b, T2.16b, XL.16b ext T1.16b, T2.16b, T2.16b, #8 b .Lgh3 1: eor TT3.16b, TT3.16b, XL.16b ext T2.16b, TT3.16b, TT3.16b, #8 b .Lgh2 2: eor TT4.16b, TT4.16b, XL.16b ext IN1.16b, TT4.16b, TT4.16b, #8 b .Lgh1 .previous eor T1.16b, T1.16b, XL.16b ext IN1.16b, T1.16b, T1.16b, #8 pmull2 XH2.1q, HH4.2d, IN1.2d // a1 * b1 eor T1.16b, T1.16b, IN1.16b pmull XL2.1q, HH4.1d, IN1.1d // a0 * b0 pmull2 XM2.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0) ext T1.16b, T2.16b, T2.16b, #8 .Lgh3: eor T2.16b, T2.16b, T1.16b pmull2 XH.1q, HH3.2d, T1.2d // a1 * b1 pmull XL.1q, HH3.1d, T1.1d // a0 * b0 pmull XM.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0) eor XH2.16b, XH2.16b, XH.16b eor XL2.16b, XL2.16b, XL.16b eor XM2.16b, XM2.16b, XM.16b ext T2.16b, TT3.16b, TT3.16b, #8 .Lgh2: eor TT3.16b, TT3.16b, T2.16b pmull2 XH.1q, HH.2d, T2.2d // a1 * b1 pmull XL.1q, HH.1d, T2.1d // a0 * b0 pmull2 XM.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0) eor XH2.16b, XH2.16b, XH.16b eor XL2.16b, XL2.16b, XL.16b eor XM2.16b, XM2.16b, XM.16b ext IN1.16b, TT4.16b, TT4.16b, #8 .Lgh1: eor TT4.16b, TT4.16b, IN1.16b pmull XL.1q, SHASH.1d, IN1.1d // a0 * b0 pmull2 XH.1q, SHASH.2d, IN1.2d // a1 * b1 pmull XM.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0) eor XH.16b, XH.16b, XH2.16b eor XL.16b, XL.16b, XL2.16b eor XM.16b, XM.16b, XM2.16b eor T2.16b, XL.16b, XH.16b ext T1.16b, XL.16b, XH.16b, #8 eor XM.16b, XM.16b, T2.16b __pmull_reduce_p64 eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b ret SYM_FUNC_END(pmull_gcm_ghash_4x) SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x) ld1 {KS0.16b}, [x5] // load upper counter sub w10, w8, #4 sub w11, w8, #3 sub w12, w8, #2 sub w13, w8, #1 rev w10, w10 rev w11, w11 rev w12, w12 rev w13, w13 mov KS1.16b, KS0.16b mov KS2.16b, KS0.16b mov KS3.16b, KS0.16b ins KS0.s[3], w10 // set lower counter ins KS1.s[3], w11 ins KS2.s[3], w12 ins KS3.s[3], w13 add x10, x6, #96 // round key pointer ld1 {K6.4s-K7.4s}, [x10], #32 .irp key, K0, K1, K2, K3, K4, K5 enc_qround KS0, KS1, KS2, KS3, \key .endr tbnz x7, #2, .Lnot128 .subsection 1 .Lnot128: ld1 {K8.4s-K9.4s}, [x10], #32 .irp key, K6, K7 enc_qround KS0, KS1, KS2, KS3, \key .endr ld1 {K6.4s-K7.4s}, [x10] .irp key, K8, K9 enc_qround KS0, KS1, KS2, KS3, \key .endr tbz x7, #1, .Lout192 b .Lout256 .previous .Lout256: .irp key, K6, K7 enc_qround KS0, KS1, KS2, KS3, \key .endr .Lout192: enc_qround KS0, KS1, KS2, KS3, KK aese KS0.16b, KL.16b aese KS1.16b, KL.16b aese KS2.16b, KL.16b aese KS3.16b, KL.16b eor KS0.16b, KS0.16b, KM.16b eor KS1.16b, KS1.16b, KM.16b eor KS2.16b, KS2.16b, KM.16b eor KS3.16b, KS3.16b, KM.16b eor INP0.16b, INP0.16b, KS0.16b eor INP1.16b, INP1.16b, KS1.16b eor INP2.16b, INP2.16b, KS2.16b eor INP3.16b, INP3.16b, KS3.16b ret SYM_FUNC_END(pmull_gcm_enc_4x) .section ".rodata", "a" .align 6 .Lpermute_table: .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf .previous
aixcc-public/challenge-001-exemplar-source
22,211
arch/arm64/crypto/aes-neonbs-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Bit sliced AES using NEON instructions * * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> */ /* * The algorithm implemented here is described in detail by the paper * 'Faster and Timing-Attack Resistant AES-GCM' by Emilia Kaesper and * Peter Schwabe (https://eprint.iacr.org/2009/129.pdf) * * This implementation is based primarily on the OpenSSL implementation * for 32-bit ARM written by Andy Polyakov <appro@openssl.org> */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/assembler.h> .text rounds .req x11 bskey .req x12 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 eor \b2, \b2, \b1 eor \b5, \b5, \b6 eor \b3, \b3, \b0 eor \b6, \b6, \b2 eor \b5, \b5, \b0 eor \b6, \b6, \b3 eor \b3, \b3, \b7 eor \b7, \b7, \b5 eor \b3, \b3, \b4 eor \b4, \b4, \b5 eor \b2, \b2, \b7 eor \b3, \b3, \b1 eor \b1, \b1, \b5 .endm .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 eor \b0, \b0, \b6 eor \b1, \b1, \b4 eor \b4, \b4, \b6 eor \b2, \b2, \b0 eor \b6, \b6, \b1 eor \b1, \b1, \b5 eor \b5, \b5, \b3 eor \b3, \b3, \b7 eor \b7, \b7, \b5 eor \b2, \b2, \b5 eor \b4, \b4, \b7 .endm .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 eor \b1, \b1, \b7 eor \b4, \b4, \b7 eor \b7, \b7, \b5 eor \b1, \b1, \b3 eor \b2, \b2, \b5 eor \b3, \b3, \b7 eor \b6, \b6, \b1 eor \b2, \b2, \b0 eor \b5, \b5, \b3 eor \b4, \b4, \b6 eor \b0, \b0, \b6 eor \b1, \b1, \b4 .endm .macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2 eor \b1, \b1, \b5 eor \b2, \b2, \b7 eor \b3, \b3, \b1 eor \b4, \b4, \b5 eor \b7, \b7, \b5 eor \b3, \b3, \b4 eor \b5, \b5, \b0 eor \b3, \b3, \b7 eor \b6, \b6, \b2 eor \b2, \b2, \b1 eor \b6, \b6, \b3 eor \b3, \b3, \b0 eor \b5, \b5, \b6 .endm .macro mul_gf4, x0, x1, y0, y1, t0, t1 eor \t0, \y0, \y1 and \t0, \t0, \x0 eor \x0, \x0, \x1 and \t1, \x1, \y0 and \x0, \x0, \y1 eor \x1, \t1, \t0 eor \x0, \x0, \t1 .endm .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 eor \t0, \y0, \y1 eor \t1, \y2, \y3 and \t0, \t0, \x0 and \t1, \t1, \x2 eor \x0, \x0, \x1 eor \x2, \x2, \x3 and \x1, \x1, \y0 and \x3, \x3, \y2 and \x0, \x0, \y1 and \x2, \x2, \y3 eor \x1, \x1, \x0 eor \x2, \x2, \x3 eor \x0, \x0, \t0 eor \x3, \x3, \t1 .endm .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ y0, y1, y2, y3, t0, t1, t2, t3 eor \t0, \x0, \x2 eor \t1, \x1, \x3 mul_gf4 \x0, \x1, \y0, \y1, \t2, \t3 eor \y0, \y0, \y2 eor \y1, \y1, \y3 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 eor \x0, \x0, \t0 eor \x2, \x2, \t0 eor \x1, \x1, \t1 eor \x3, \x3, \t1 eor \t0, \x4, \x6 eor \t1, \x5, \x7 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 eor \y0, \y0, \y2 eor \y1, \y1, \y3 mul_gf4 \x4, \x5, \y0, \y1, \t2, \t3 eor \x4, \x4, \t0 eor \x6, \x6, \t0 eor \x5, \x5, \t1 eor \x7, \x7, \t1 .endm .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ t0, t1, t2, t3, s0, s1, s2, s3 eor \t3, \x4, \x6 eor \t0, \x5, \x7 eor \t1, \x1, \x3 eor \s1, \x7, \x6 eor \s0, \x0, \x2 eor \s3, \t3, \t0 orr \t2, \t0, \t1 and \s2, \t3, \s0 orr \t3, \t3, \s0 eor \s0, \s0, \t1 and \t0, \t0, \t1 eor \t1, \x3, \x2 and \s3, \s3, \s0 and \s1, \s1, \t1 eor \t1, \x4, \x5 eor \s0, \x1, \x0 eor \t3, \t3, \s1 eor \t2, \t2, \s1 and \s1, \t1, \s0 orr \t1, \t1, \s0 eor \t3, \t3, \s3 eor \t0, \t0, \s1 eor \t2, \t2, \s2 eor \t1, \t1, \s3 eor \t0, \t0, \s2 and \s0, \x7, \x3 eor \t1, \t1, \s2 and \s1, \x6, \x2 and \s2, \x5, \x1 orr \s3, \x4, \x0 eor \t3, \t3, \s0 eor \t1, \t1, \s2 eor \s0, \t0, \s3 eor \t2, \t2, \s1 and \s2, \t3, \t1 eor \s1, \t2, \s2 eor \s3, \s0, \s2 bsl \s1, \t1, \s0 not \t0, \s0 bsl \s0, \s1, \s3 bsl \t0, \s1, \s3 bsl \s3, \t3, \t2 eor \t3, \t3, \t2 and \s2, \s0, \s3 eor \t1, \t1, \t0 eor \s2, \s2, \t3 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3 .endm .macro sbox, b0, b1, b2, b3, b4, b5, b6, b7, \ t0, t1, t2, t3, s0, s1, s2, s3 in_bs_ch \b0\().16b, \b1\().16b, \b2\().16b, \b3\().16b, \ \b4\().16b, \b5\().16b, \b6\().16b, \b7\().16b inv_gf256 \b6\().16b, \b5\().16b, \b0\().16b, \b3\().16b, \ \b7\().16b, \b1\().16b, \b4\().16b, \b2\().16b, \ \t0\().16b, \t1\().16b, \t2\().16b, \t3\().16b, \ \s0\().16b, \s1\().16b, \s2\().16b, \s3\().16b out_bs_ch \b7\().16b, \b1\().16b, \b4\().16b, \b2\().16b, \ \b6\().16b, \b5\().16b, \b0\().16b, \b3\().16b .endm .macro inv_sbox, b0, b1, b2, b3, b4, b5, b6, b7, \ t0, t1, t2, t3, s0, s1, s2, s3 inv_in_bs_ch \b0\().16b, \b1\().16b, \b2\().16b, \b3\().16b, \ \b4\().16b, \b5\().16b, \b6\().16b, \b7\().16b inv_gf256 \b5\().16b, \b1\().16b, \b2\().16b, \b6\().16b, \ \b3\().16b, \b7\().16b, \b0\().16b, \b4\().16b, \ \t0\().16b, \t1\().16b, \t2\().16b, \t3\().16b, \ \s0\().16b, \s1\().16b, \s2\().16b, \s3\().16b inv_out_bs_ch \b3\().16b, \b7\().16b, \b0\().16b, \b4\().16b, \ \b5\().16b, \b1\().16b, \b2\().16b, \b6\().16b .endm .macro enc_next_rk ldp q16, q17, [bskey], #128 ldp q18, q19, [bskey, #-96] ldp q20, q21, [bskey, #-64] ldp q22, q23, [bskey, #-32] .endm .macro dec_next_rk ldp q16, q17, [bskey, #-128]! ldp q18, q19, [bskey, #32] ldp q20, q21, [bskey, #64] ldp q22, q23, [bskey, #96] .endm .macro add_round_key, x0, x1, x2, x3, x4, x5, x6, x7 eor \x0\().16b, \x0\().16b, v16.16b eor \x1\().16b, \x1\().16b, v17.16b eor \x2\().16b, \x2\().16b, v18.16b eor \x3\().16b, \x3\().16b, v19.16b eor \x4\().16b, \x4\().16b, v20.16b eor \x5\().16b, \x5\().16b, v21.16b eor \x6\().16b, \x6\().16b, v22.16b eor \x7\().16b, \x7\().16b, v23.16b .endm .macro shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, mask tbl \x0\().16b, {\x0\().16b}, \mask\().16b tbl \x1\().16b, {\x1\().16b}, \mask\().16b tbl \x2\().16b, {\x2\().16b}, \mask\().16b tbl \x3\().16b, {\x3\().16b}, \mask\().16b tbl \x4\().16b, {\x4\().16b}, \mask\().16b tbl \x5\().16b, {\x5\().16b}, \mask\().16b tbl \x6\().16b, {\x6\().16b}, \mask\().16b tbl \x7\().16b, {\x7\().16b}, \mask\().16b .endm .macro mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \ t0, t1, t2, t3, t4, t5, t6, t7, inv ext \t0\().16b, \x0\().16b, \x0\().16b, #12 ext \t1\().16b, \x1\().16b, \x1\().16b, #12 eor \x0\().16b, \x0\().16b, \t0\().16b ext \t2\().16b, \x2\().16b, \x2\().16b, #12 eor \x1\().16b, \x1\().16b, \t1\().16b ext \t3\().16b, \x3\().16b, \x3\().16b, #12 eor \x2\().16b, \x2\().16b, \t2\().16b ext \t4\().16b, \x4\().16b, \x4\().16b, #12 eor \x3\().16b, \x3\().16b, \t3\().16b ext \t5\().16b, \x5\().16b, \x5\().16b, #12 eor \x4\().16b, \x4\().16b, \t4\().16b ext \t6\().16b, \x6\().16b, \x6\().16b, #12 eor \x5\().16b, \x5\().16b, \t5\().16b ext \t7\().16b, \x7\().16b, \x7\().16b, #12 eor \x6\().16b, \x6\().16b, \t6\().16b eor \t1\().16b, \t1\().16b, \x0\().16b eor \x7\().16b, \x7\().16b, \t7\().16b ext \x0\().16b, \x0\().16b, \x0\().16b, #8 eor \t2\().16b, \t2\().16b, \x1\().16b eor \t0\().16b, \t0\().16b, \x7\().16b eor \t1\().16b, \t1\().16b, \x7\().16b ext \x1\().16b, \x1\().16b, \x1\().16b, #8 eor \t5\().16b, \t5\().16b, \x4\().16b eor \x0\().16b, \x0\().16b, \t0\().16b eor \t6\().16b, \t6\().16b, \x5\().16b eor \x1\().16b, \x1\().16b, \t1\().16b ext \t0\().16b, \x4\().16b, \x4\().16b, #8 eor \t4\().16b, \t4\().16b, \x3\().16b ext \t1\().16b, \x5\().16b, \x5\().16b, #8 eor \t7\().16b, \t7\().16b, \x6\().16b ext \x4\().16b, \x3\().16b, \x3\().16b, #8 eor \t3\().16b, \t3\().16b, \x2\().16b ext \x5\().16b, \x7\().16b, \x7\().16b, #8 eor \t4\().16b, \t4\().16b, \x7\().16b ext \x3\().16b, \x6\().16b, \x6\().16b, #8 eor \t3\().16b, \t3\().16b, \x7\().16b ext \x6\().16b, \x2\().16b, \x2\().16b, #8 eor \x7\().16b, \t1\().16b, \t5\().16b .ifb \inv eor \x2\().16b, \t0\().16b, \t4\().16b eor \x4\().16b, \x4\().16b, \t3\().16b eor \x5\().16b, \x5\().16b, \t7\().16b eor \x3\().16b, \x3\().16b, \t6\().16b eor \x6\().16b, \x6\().16b, \t2\().16b .else eor \t3\().16b, \t3\().16b, \x4\().16b eor \x5\().16b, \x5\().16b, \t7\().16b eor \x2\().16b, \x3\().16b, \t6\().16b eor \x3\().16b, \t0\().16b, \t4\().16b eor \x4\().16b, \x6\().16b, \t2\().16b mov \x6\().16b, \t3\().16b .endif .endm .macro inv_mix_cols, x0, x1, x2, x3, x4, x5, x6, x7, \ t0, t1, t2, t3, t4, t5, t6, t7 ext \t0\().16b, \x0\().16b, \x0\().16b, #8 ext \t6\().16b, \x6\().16b, \x6\().16b, #8 ext \t7\().16b, \x7\().16b, \x7\().16b, #8 eor \t0\().16b, \t0\().16b, \x0\().16b ext \t1\().16b, \x1\().16b, \x1\().16b, #8 eor \t6\().16b, \t6\().16b, \x6\().16b ext \t2\().16b, \x2\().16b, \x2\().16b, #8 eor \t7\().16b, \t7\().16b, \x7\().16b ext \t3\().16b, \x3\().16b, \x3\().16b, #8 eor \t1\().16b, \t1\().16b, \x1\().16b ext \t4\().16b, \x4\().16b, \x4\().16b, #8 eor \t2\().16b, \t2\().16b, \x2\().16b ext \t5\().16b, \x5\().16b, \x5\().16b, #8 eor \t3\().16b, \t3\().16b, \x3\().16b eor \t4\().16b, \t4\().16b, \x4\().16b eor \t5\().16b, \t5\().16b, \x5\().16b eor \x0\().16b, \x0\().16b, \t6\().16b eor \x1\().16b, \x1\().16b, \t6\().16b eor \x2\().16b, \x2\().16b, \t0\().16b eor \x4\().16b, \x4\().16b, \t2\().16b eor \x3\().16b, \x3\().16b, \t1\().16b eor \x1\().16b, \x1\().16b, \t7\().16b eor \x2\().16b, \x2\().16b, \t7\().16b eor \x4\().16b, \x4\().16b, \t6\().16b eor \x5\().16b, \x5\().16b, \t3\().16b eor \x3\().16b, \x3\().16b, \t6\().16b eor \x6\().16b, \x6\().16b, \t4\().16b eor \x4\().16b, \x4\().16b, \t7\().16b eor \x5\().16b, \x5\().16b, \t7\().16b eor \x7\().16b, \x7\().16b, \t5\().16b mix_cols \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ \t0, \t1, \t2, \t3, \t4, \t5, \t6, \t7, 1 .endm .macro swapmove_2x, a0, b0, a1, b1, n, mask, t0, t1 ushr \t0\().2d, \b0\().2d, #\n ushr \t1\().2d, \b1\().2d, #\n eor \t0\().16b, \t0\().16b, \a0\().16b eor \t1\().16b, \t1\().16b, \a1\().16b and \t0\().16b, \t0\().16b, \mask\().16b and \t1\().16b, \t1\().16b, \mask\().16b eor \a0\().16b, \a0\().16b, \t0\().16b shl \t0\().2d, \t0\().2d, #\n eor \a1\().16b, \a1\().16b, \t1\().16b shl \t1\().2d, \t1\().2d, #\n eor \b0\().16b, \b0\().16b, \t0\().16b eor \b1\().16b, \b1\().16b, \t1\().16b .endm .macro bitslice, x7, x6, x5, x4, x3, x2, x1, x0, t0, t1, t2, t3 movi \t0\().16b, #0x55 movi \t1\().16b, #0x33 swapmove_2x \x0, \x1, \x2, \x3, 1, \t0, \t2, \t3 swapmove_2x \x4, \x5, \x6, \x7, 1, \t0, \t2, \t3 movi \t0\().16b, #0x0f swapmove_2x \x0, \x2, \x1, \x3, 2, \t1, \t2, \t3 swapmove_2x \x4, \x6, \x5, \x7, 2, \t1, \t2, \t3 swapmove_2x \x0, \x4, \x1, \x5, 4, \t0, \t2, \t3 swapmove_2x \x2, \x6, \x3, \x7, 4, \t0, \t2, \t3 .endm .align 6 M0: .octa 0x0004080c0105090d02060a0e03070b0f M0SR: .octa 0x0004080c05090d010a0e02060f03070b SR: .octa 0x0f0e0d0c0a09080b0504070600030201 SRM0: .octa 0x01060b0c0207080d0304090e00050a0f M0ISR: .octa 0x0004080c0d0105090a0e0206070b0f03 ISR: .octa 0x0f0e0d0c080b0a090504070602010003 ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f /* * void aesbs_convert_key(u8 out[], u32 const rk[], int rounds) */ SYM_FUNC_START(aesbs_convert_key) ld1 {v7.4s}, [x1], #16 // load round 0 key ld1 {v17.4s}, [x1], #16 // load round 1 key movi v8.16b, #0x01 // bit masks movi v9.16b, #0x02 movi v10.16b, #0x04 movi v11.16b, #0x08 movi v12.16b, #0x10 movi v13.16b, #0x20 movi v14.16b, #0x40 movi v15.16b, #0x80 ldr q16, M0 sub x2, x2, #1 str q7, [x0], #16 // save round 0 key .Lkey_loop: tbl v7.16b ,{v17.16b}, v16.16b ld1 {v17.4s}, [x1], #16 // load next round key cmtst v0.16b, v7.16b, v8.16b cmtst v1.16b, v7.16b, v9.16b cmtst v2.16b, v7.16b, v10.16b cmtst v3.16b, v7.16b, v11.16b cmtst v4.16b, v7.16b, v12.16b cmtst v5.16b, v7.16b, v13.16b cmtst v6.16b, v7.16b, v14.16b cmtst v7.16b, v7.16b, v15.16b not v0.16b, v0.16b not v1.16b, v1.16b not v5.16b, v5.16b not v6.16b, v6.16b subs x2, x2, #1 stp q0, q1, [x0], #128 stp q2, q3, [x0, #-96] stp q4, q5, [x0, #-64] stp q6, q7, [x0, #-32] b.ne .Lkey_loop movi v7.16b, #0x63 // compose .L63 eor v17.16b, v17.16b, v7.16b str q17, [x0] ret SYM_FUNC_END(aesbs_convert_key) .align 4 SYM_FUNC_START_LOCAL(aesbs_encrypt8) ldr q9, [bskey], #16 // round 0 key ldr q8, M0SR ldr q24, SR eor v10.16b, v0.16b, v9.16b // xor with round0 key eor v11.16b, v1.16b, v9.16b tbl v0.16b, {v10.16b}, v8.16b eor v12.16b, v2.16b, v9.16b tbl v1.16b, {v11.16b}, v8.16b eor v13.16b, v3.16b, v9.16b tbl v2.16b, {v12.16b}, v8.16b eor v14.16b, v4.16b, v9.16b tbl v3.16b, {v13.16b}, v8.16b eor v15.16b, v5.16b, v9.16b tbl v4.16b, {v14.16b}, v8.16b eor v10.16b, v6.16b, v9.16b tbl v5.16b, {v15.16b}, v8.16b eor v11.16b, v7.16b, v9.16b tbl v6.16b, {v10.16b}, v8.16b tbl v7.16b, {v11.16b}, v8.16b bitslice v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 sub rounds, rounds, #1 b .Lenc_sbox .Lenc_loop: shift_rows v0, v1, v2, v3, v4, v5, v6, v7, v24 .Lenc_sbox: sbox v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, \ v13, v14, v15 subs rounds, rounds, #1 b.cc .Lenc_done enc_next_rk mix_cols v0, v1, v4, v6, v3, v7, v2, v5, v8, v9, v10, v11, v12, \ v13, v14, v15 add_round_key v0, v1, v2, v3, v4, v5, v6, v7 b.ne .Lenc_loop ldr q24, SRM0 b .Lenc_loop .Lenc_done: ldr q12, [bskey] // last round key bitslice v0, v1, v4, v6, v3, v7, v2, v5, v8, v9, v10, v11 eor v0.16b, v0.16b, v12.16b eor v1.16b, v1.16b, v12.16b eor v4.16b, v4.16b, v12.16b eor v6.16b, v6.16b, v12.16b eor v3.16b, v3.16b, v12.16b eor v7.16b, v7.16b, v12.16b eor v2.16b, v2.16b, v12.16b eor v5.16b, v5.16b, v12.16b ret SYM_FUNC_END(aesbs_encrypt8) .align 4 SYM_FUNC_START_LOCAL(aesbs_decrypt8) lsl x9, rounds, #7 add bskey, bskey, x9 ldr q9, [bskey, #-112]! // round 0 key ldr q8, M0ISR ldr q24, ISR eor v10.16b, v0.16b, v9.16b // xor with round0 key eor v11.16b, v1.16b, v9.16b tbl v0.16b, {v10.16b}, v8.16b eor v12.16b, v2.16b, v9.16b tbl v1.16b, {v11.16b}, v8.16b eor v13.16b, v3.16b, v9.16b tbl v2.16b, {v12.16b}, v8.16b eor v14.16b, v4.16b, v9.16b tbl v3.16b, {v13.16b}, v8.16b eor v15.16b, v5.16b, v9.16b tbl v4.16b, {v14.16b}, v8.16b eor v10.16b, v6.16b, v9.16b tbl v5.16b, {v15.16b}, v8.16b eor v11.16b, v7.16b, v9.16b tbl v6.16b, {v10.16b}, v8.16b tbl v7.16b, {v11.16b}, v8.16b bitslice v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 sub rounds, rounds, #1 b .Ldec_sbox .Ldec_loop: shift_rows v0, v1, v2, v3, v4, v5, v6, v7, v24 .Ldec_sbox: inv_sbox v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, \ v13, v14, v15 subs rounds, rounds, #1 b.cc .Ldec_done dec_next_rk add_round_key v0, v1, v6, v4, v2, v7, v3, v5 inv_mix_cols v0, v1, v6, v4, v2, v7, v3, v5, v8, v9, v10, v11, v12, \ v13, v14, v15 b.ne .Ldec_loop ldr q24, ISRM0 b .Ldec_loop .Ldec_done: ldr q12, [bskey, #-16] // last round key bitslice v0, v1, v6, v4, v2, v7, v3, v5, v8, v9, v10, v11 eor v0.16b, v0.16b, v12.16b eor v1.16b, v1.16b, v12.16b eor v6.16b, v6.16b, v12.16b eor v4.16b, v4.16b, v12.16b eor v2.16b, v2.16b, v12.16b eor v7.16b, v7.16b, v12.16b eor v3.16b, v3.16b, v12.16b eor v5.16b, v5.16b, v12.16b ret SYM_FUNC_END(aesbs_decrypt8) /* * aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) * aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks) */ .macro __ecb_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 frame_push 5 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 99: mov x5, #1 lsl x5, x5, x23 subs w23, w23, #8 csel x23, x23, xzr, pl csel x5, x5, xzr, mi ld1 {v0.16b}, [x20], #16 tbnz x5, #1, 0f ld1 {v1.16b}, [x20], #16 tbnz x5, #2, 0f ld1 {v2.16b}, [x20], #16 tbnz x5, #3, 0f ld1 {v3.16b}, [x20], #16 tbnz x5, #4, 0f ld1 {v4.16b}, [x20], #16 tbnz x5, #5, 0f ld1 {v5.16b}, [x20], #16 tbnz x5, #6, 0f ld1 {v6.16b}, [x20], #16 tbnz x5, #7, 0f ld1 {v7.16b}, [x20], #16 0: mov bskey, x21 mov rounds, x22 bl \do8 st1 {\o0\().16b}, [x19], #16 tbnz x5, #1, 1f st1 {\o1\().16b}, [x19], #16 tbnz x5, #2, 1f st1 {\o2\().16b}, [x19], #16 tbnz x5, #3, 1f st1 {\o3\().16b}, [x19], #16 tbnz x5, #4, 1f st1 {\o4\().16b}, [x19], #16 tbnz x5, #5, 1f st1 {\o5\().16b}, [x19], #16 tbnz x5, #6, 1f st1 {\o6\().16b}, [x19], #16 tbnz x5, #7, 1f st1 {\o7\().16b}, [x19], #16 cbz x23, 1f b 99b 1: frame_pop ret .endm .align 4 SYM_TYPED_FUNC_START(aesbs_ecb_encrypt) __ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 SYM_FUNC_END(aesbs_ecb_encrypt) .align 4 SYM_TYPED_FUNC_START(aesbs_ecb_decrypt) __ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 SYM_FUNC_END(aesbs_ecb_decrypt) /* * aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) */ .align 4 SYM_FUNC_START(aesbs_cbc_decrypt) frame_push 6 mov x19, x0 mov x20, x1 mov x21, x2 mov x22, x3 mov x23, x4 mov x24, x5 99: mov x6, #1 lsl x6, x6, x23 subs w23, w23, #8 csel x23, x23, xzr, pl csel x6, x6, xzr, mi ld1 {v0.16b}, [x20], #16 mov v25.16b, v0.16b tbnz x6, #1, 0f ld1 {v1.16b}, [x20], #16 mov v26.16b, v1.16b tbnz x6, #2, 0f ld1 {v2.16b}, [x20], #16 mov v27.16b, v2.16b tbnz x6, #3, 0f ld1 {v3.16b}, [x20], #16 mov v28.16b, v3.16b tbnz x6, #4, 0f ld1 {v4.16b}, [x20], #16 mov v29.16b, v4.16b tbnz x6, #5, 0f ld1 {v5.16b}, [x20], #16 mov v30.16b, v5.16b tbnz x6, #6, 0f ld1 {v6.16b}, [x20], #16 mov v31.16b, v6.16b tbnz x6, #7, 0f ld1 {v7.16b}, [x20] 0: mov bskey, x21 mov rounds, x22 bl aesbs_decrypt8 ld1 {v24.16b}, [x24] // load IV eor v1.16b, v1.16b, v25.16b eor v6.16b, v6.16b, v26.16b eor v4.16b, v4.16b, v27.16b eor v2.16b, v2.16b, v28.16b eor v7.16b, v7.16b, v29.16b eor v0.16b, v0.16b, v24.16b eor v3.16b, v3.16b, v30.16b eor v5.16b, v5.16b, v31.16b st1 {v0.16b}, [x19], #16 mov v24.16b, v25.16b tbnz x6, #1, 1f st1 {v1.16b}, [x19], #16 mov v24.16b, v26.16b tbnz x6, #2, 1f st1 {v6.16b}, [x19], #16 mov v24.16b, v27.16b tbnz x6, #3, 1f st1 {v4.16b}, [x19], #16 mov v24.16b, v28.16b tbnz x6, #4, 1f st1 {v2.16b}, [x19], #16 mov v24.16b, v29.16b tbnz x6, #5, 1f st1 {v7.16b}, [x19], #16 mov v24.16b, v30.16b tbnz x6, #6, 1f st1 {v3.16b}, [x19], #16 mov v24.16b, v31.16b tbnz x6, #7, 1f ld1 {v24.16b}, [x20], #16 st1 {v5.16b}, [x19], #16 1: st1 {v24.16b}, [x24] // store IV cbz x23, 2f b 99b 2: frame_pop ret SYM_FUNC_END(aesbs_cbc_decrypt) .macro next_tweak, out, in, const, tmp sshr \tmp\().2d, \in\().2d, #63 and \tmp\().16b, \tmp\().16b, \const\().16b add \out\().2d, \in\().2d, \in\().2d ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 eor \out\().16b, \out\().16b, \tmp\().16b .endm /* * aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) * aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 iv[]) */ SYM_FUNC_START_LOCAL(__xts_crypt8) movi v18.2s, #0x1 movi v19.2s, #0x87 uzp1 v18.4s, v18.4s, v19.4s ld1 {v0.16b-v3.16b}, [x1], #64 ld1 {v4.16b-v7.16b}, [x1], #64 next_tweak v26, v25, v18, v19 next_tweak v27, v26, v18, v19 next_tweak v28, v27, v18, v19 next_tweak v29, v28, v18, v19 next_tweak v30, v29, v18, v19 next_tweak v31, v30, v18, v19 next_tweak v16, v31, v18, v19 next_tweak v17, v16, v18, v19 eor v0.16b, v0.16b, v25.16b eor v1.16b, v1.16b, v26.16b eor v2.16b, v2.16b, v27.16b eor v3.16b, v3.16b, v28.16b eor v4.16b, v4.16b, v29.16b eor v5.16b, v5.16b, v30.16b eor v6.16b, v6.16b, v31.16b eor v7.16b, v7.16b, v16.16b stp q16, q17, [sp, #16] mov bskey, x2 mov rounds, x3 br x16 SYM_FUNC_END(__xts_crypt8) .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 stp x29, x30, [sp, #-48]! mov x29, sp ld1 {v25.16b}, [x5] 0: adr x16, \do8 bl __xts_crypt8 eor v16.16b, \o0\().16b, v25.16b eor v17.16b, \o1\().16b, v26.16b eor v18.16b, \o2\().16b, v27.16b eor v19.16b, \o3\().16b, v28.16b ldp q24, q25, [sp, #16] eor v20.16b, \o4\().16b, v29.16b eor v21.16b, \o5\().16b, v30.16b eor v22.16b, \o6\().16b, v31.16b eor v23.16b, \o7\().16b, v24.16b st1 {v16.16b-v19.16b}, [x0], #64 st1 {v20.16b-v23.16b}, [x0], #64 subs x4, x4, #8 b.gt 0b st1 {v25.16b}, [x5] ldp x29, x30, [sp], #48 ret .endm SYM_TYPED_FUNC_START(aesbs_xts_encrypt) __xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 SYM_FUNC_END(aesbs_xts_encrypt) SYM_TYPED_FUNC_START(aesbs_xts_decrypt) __xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 SYM_FUNC_END(aesbs_xts_decrypt) .macro next_ctr, v mov \v\().d[1], x8 adds x8, x8, #1 mov \v\().d[0], x7 adc x7, x7, xzr rev64 \v\().16b, \v\().16b .endm /* * aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], * int rounds, int blocks, u8 iv[]) */ SYM_FUNC_START(aesbs_ctr_encrypt) stp x29, x30, [sp, #-16]! mov x29, sp ldp x7, x8, [x5] ld1 {v0.16b}, [x5] CPU_LE( rev x7, x7 ) CPU_LE( rev x8, x8 ) adds x8, x8, #1 adc x7, x7, xzr 0: next_ctr v1 next_ctr v2 next_ctr v3 next_ctr v4 next_ctr v5 next_ctr v6 next_ctr v7 mov bskey, x2 mov rounds, x3 bl aesbs_encrypt8 ld1 { v8.16b-v11.16b}, [x1], #64 ld1 {v12.16b-v15.16b}, [x1], #64 eor v8.16b, v0.16b, v8.16b eor v9.16b, v1.16b, v9.16b eor v10.16b, v4.16b, v10.16b eor v11.16b, v6.16b, v11.16b eor v12.16b, v3.16b, v12.16b eor v13.16b, v7.16b, v13.16b eor v14.16b, v2.16b, v14.16b eor v15.16b, v5.16b, v15.16b st1 { v8.16b-v11.16b}, [x0], #64 st1 {v12.16b-v15.16b}, [x0], #64 next_ctr v0 subs x4, x4, #8 b.gt 0b st1 {v0.16b}, [x5] ldp x29, x30, [sp], #16 ret SYM_FUNC_END(aesbs_ctr_encrypt)
aixcc-public/challenge-001-exemplar-source
7,624
arch/arm64/crypto/aes-neon.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON * * Copyright (C) 2013 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> #define AES_FUNC_START(func) SYM_FUNC_START(neon_ ## func) #define AES_FUNC_END(func) SYM_FUNC_END(neon_ ## func) xtsmask .req v7 cbciv .req v7 vctr .req v4 .macro xts_reload_mask, tmp xts_load_mask \tmp .endm /* special case for the neon-bs driver calling into this one for CTS */ .macro xts_cts_skip_tw, reg, lbl tbnz \reg, #1, \lbl .endm /* multiply by polynomial 'x' in GF(2^8) */ .macro mul_by_x, out, in, temp, const sshr \temp, \in, #7 shl \out, \in, #1 and \temp, \temp, \const eor \out, \out, \temp .endm /* multiply by polynomial 'x^2' in GF(2^8) */ .macro mul_by_x2, out, in, temp, const ushr \temp, \in, #6 shl \out, \in, #2 pmul \temp, \temp, \const eor \out, \out, \temp .endm /* preload the entire Sbox */ .macro prepare, sbox, shiftrows, temp movi v12.16b, #0x1b ldr_l q13, \shiftrows, \temp ldr_l q14, .Lror32by8, \temp adr_l \temp, \sbox ld1 {v16.16b-v19.16b}, [\temp], #64 ld1 {v20.16b-v23.16b}, [\temp], #64 ld1 {v24.16b-v27.16b}, [\temp], #64 ld1 {v28.16b-v31.16b}, [\temp] .endm /* do preload for encryption */ .macro enc_prepare, ignore0, ignore1, temp prepare crypto_aes_sbox, .LForward_ShiftRows, \temp .endm .macro enc_switch_key, ignore0, ignore1, temp /* do nothing */ .endm /* do preload for decryption */ .macro dec_prepare, ignore0, ignore1, temp prepare crypto_aes_inv_sbox, .LReverse_ShiftRows, \temp .endm /* apply SubBytes transformation using the preloaded Sbox */ .macro sub_bytes, in sub v9.16b, \in\().16b, v15.16b tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b sub v10.16b, v9.16b, v15.16b tbx \in\().16b, {v20.16b-v23.16b}, v9.16b sub v11.16b, v10.16b, v15.16b tbx \in\().16b, {v24.16b-v27.16b}, v10.16b tbx \in\().16b, {v28.16b-v31.16b}, v11.16b .endm /* apply MixColumns transformation */ .macro mix_columns, in, enc .if \enc == 0 /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */ mul_by_x2 v8.16b, \in\().16b, v9.16b, v12.16b eor \in\().16b, \in\().16b, v8.16b rev32 v8.8h, v8.8h eor \in\().16b, \in\().16b, v8.16b .endif mul_by_x v9.16b, \in\().16b, v8.16b, v12.16b rev32 v8.8h, \in\().8h eor v8.16b, v8.16b, v9.16b eor \in\().16b, \in\().16b, v8.16b tbl \in\().16b, {\in\().16b}, v14.16b eor \in\().16b, \in\().16b, v8.16b .endm .macro do_block, enc, in, rounds, rk, rkp, i ld1 {v15.4s}, [\rk] add \rkp, \rk, #16 mov \i, \rounds 1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */ movi v15.16b, #0x40 tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ sub_bytes \in subs \i, \i, #1 ld1 {v15.4s}, [\rkp], #16 beq 2222f mix_columns \in, \enc b 1111b 2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */ .endm .macro encrypt_block, in, rounds, rk, rkp, i do_block 1, \in, \rounds, \rk, \rkp, \i .endm .macro decrypt_block, in, rounds, rk, rkp, i do_block 0, \in, \rounds, \rk, \rkp, \i .endm /* * Interleaved versions: functionally equivalent to the * ones above, but applied to AES states in parallel. */ .macro sub_bytes_4x, in0, in1, in2, in3 sub v8.16b, \in0\().16b, v15.16b tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b sub v9.16b, \in1\().16b, v15.16b tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b sub v10.16b, \in2\().16b, v15.16b tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b sub v11.16b, \in3\().16b, v15.16b tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b sub v8.16b, v8.16b, v15.16b tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b sub v9.16b, v9.16b, v15.16b tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b sub v10.16b, v10.16b, v15.16b tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b sub v11.16b, v11.16b, v15.16b tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b sub v8.16b, v8.16b, v15.16b tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b sub v9.16b, v9.16b, v15.16b tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b sub v10.16b, v10.16b, v15.16b tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b sub v11.16b, v11.16b, v15.16b tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b .endm .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const sshr \tmp0\().16b, \in0\().16b, #7 shl \out0\().16b, \in0\().16b, #1 sshr \tmp1\().16b, \in1\().16b, #7 and \tmp0\().16b, \tmp0\().16b, \const\().16b shl \out1\().16b, \in1\().16b, #1 and \tmp1\().16b, \tmp1\().16b, \const\().16b eor \out0\().16b, \out0\().16b, \tmp0\().16b eor \out1\().16b, \out1\().16b, \tmp1\().16b .endm .macro mul_by_x2_2x, out0, out1, in0, in1, tmp0, tmp1, const ushr \tmp0\().16b, \in0\().16b, #6 shl \out0\().16b, \in0\().16b, #2 ushr \tmp1\().16b, \in1\().16b, #6 pmul \tmp0\().16b, \tmp0\().16b, \const\().16b shl \out1\().16b, \in1\().16b, #2 pmul \tmp1\().16b, \tmp1\().16b, \const\().16b eor \out0\().16b, \out0\().16b, \tmp0\().16b eor \out1\().16b, \out1\().16b, \tmp1\().16b .endm .macro mix_columns_2x, in0, in1, enc .if \enc == 0 /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */ mul_by_x2_2x v8, v9, \in0, \in1, v10, v11, v12 eor \in0\().16b, \in0\().16b, v8.16b rev32 v8.8h, v8.8h eor \in1\().16b, \in1\().16b, v9.16b rev32 v9.8h, v9.8h eor \in0\().16b, \in0\().16b, v8.16b eor \in1\().16b, \in1\().16b, v9.16b .endif mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v12 rev32 v10.8h, \in0\().8h rev32 v11.8h, \in1\().8h eor v10.16b, v10.16b, v8.16b eor v11.16b, v11.16b, v9.16b eor \in0\().16b, \in0\().16b, v10.16b eor \in1\().16b, \in1\().16b, v11.16b tbl \in0\().16b, {\in0\().16b}, v14.16b tbl \in1\().16b, {\in1\().16b}, v14.16b eor \in0\().16b, \in0\().16b, v10.16b eor \in1\().16b, \in1\().16b, v11.16b .endm .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i ld1 {v15.4s}, [\rk] add \rkp, \rk, #16 mov \i, \rounds 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */ movi v15.16b, #0x40 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */ tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */ sub_bytes_4x \in0, \in1, \in2, \in3 subs \i, \i, #1 ld1 {v15.4s}, [\rkp], #16 beq 2222f mix_columns_2x \in0, \in1, \enc mix_columns_2x \in2, \in3, \enc b 1111b 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */ .endm .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i .endm .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i .endm #include "aes-modes.S" .section ".rodata", "a" .align 4 .LForward_ShiftRows: .octa 0x0b06010c07020d08030e09040f0a0500 .LReverse_ShiftRows: .octa 0x0306090c0f0205080b0e0104070a0d00 .Lror32by8: .octa 0x0c0f0e0d080b0a090407060500030201
aixcc-public/challenge-001-exemplar-source
3,851
arch/arm64/crypto/sha2-ce-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions * * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> .text .arch armv8-a+crypto dga .req q20 dgav .req v20 dgb .req q21 dgbv .req v21 t0 .req v22 t1 .req v23 dg0q .req q24 dg0v .req v24 dg1q .req q25 dg1v .req v25 dg2q .req q26 dg2v .req v26 .macro add_only, ev, rc, s0 mov dg2v.16b, dg0v.16b .ifeq \ev add t1.4s, v\s0\().4s, \rc\().4s sha256h dg0q, dg1q, t0.4s sha256h2 dg1q, dg2q, t0.4s .else .ifnb \s0 add t0.4s, v\s0\().4s, \rc\().4s .endif sha256h dg0q, dg1q, t1.4s sha256h2 dg1q, dg2q, t1.4s .endif .endm .macro add_update, ev, rc, s0, s1, s2, s3 sha256su0 v\s0\().4s, v\s1\().4s add_only \ev, \rc, \s1 sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s .endm /* * The SHA-256 round constants */ .section ".rodata", "a" .align 4 .Lsha2_rcon: .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, * int blocks) */ .text SYM_FUNC_START(sha2_ce_transform) /* load round constants */ adr_l x8, .Lsha2_rcon ld1 { v0.4s- v3.4s}, [x8], #64 ld1 { v4.4s- v7.4s}, [x8], #64 ld1 { v8.4s-v11.4s}, [x8], #64 ld1 {v12.4s-v15.4s}, [x8] /* load state */ ld1 {dgav.4s, dgbv.4s}, [x0] /* load sha256_ce_state::finalize */ ldr_l w4, sha256_ce_offsetof_finalize, x4 ldr w4, [x0, x4] /* load input */ 0: ld1 {v16.4s-v19.4s}, [x1], #64 sub w2, w2, #1 CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v19.16b, v19.16b ) 1: add t0.4s, v16.4s, v0.4s mov dg0v.16b, dgav.16b mov dg1v.16b, dgbv.16b add_update 0, v1, 16, 17, 18, 19 add_update 1, v2, 17, 18, 19, 16 add_update 0, v3, 18, 19, 16, 17 add_update 1, v4, 19, 16, 17, 18 add_update 0, v5, 16, 17, 18, 19 add_update 1, v6, 17, 18, 19, 16 add_update 0, v7, 18, 19, 16, 17 add_update 1, v8, 19, 16, 17, 18 add_update 0, v9, 16, 17, 18, 19 add_update 1, v10, 17, 18, 19, 16 add_update 0, v11, 18, 19, 16, 17 add_update 1, v12, 19, 16, 17, 18 add_only 0, v13, 17 add_only 1, v14, 18 add_only 0, v15, 19 add_only 1 /* update state */ add dgav.4s, dgav.4s, dg0v.4s add dgbv.4s, dgbv.4s, dg1v.4s /* handled all input blocks? */ cbz w2, 2f cond_yield 3f, x5, x6 b 0b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ 2: cbz x4, 3f ldr_l w4, sha256_ce_offsetof_count, x4 ldr x4, [x0, x4] movi v17.2d, #0 mov x8, #0x80000000 movi v18.2d, #0 ror x7, x4, #29 // ror(lsl(x4, 3), 32) fmov d16, x8 mov x4, #0 mov v19.d[0], xzr mov v19.d[1], x7 b 1b /* store new state */ 3: st1 {dgav.4s, dgbv.4s}, [x0] mov w0, w2 ret SYM_FUNC_END(sha2_ce_transform)
aixcc-public/challenge-001-exemplar-source
19,011
arch/arm64/crypto/chacha-neon-core.S
/* * ChaCha/XChaCha NEON helper functions * * Copyright (C) 2016-2018 Linaro, Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Originally based on: * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions * * Copyright (C) 2015 Martin Willi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> .text .align 6 /* * chacha_permute - permute one block * * Permute one 64-byte block where the state matrix is stored in the four NEON * registers v0-v3. It performs matrix operations on four words in parallel, * but requires shuffling to rearrange the words after each round. * * The round count is given in w3. * * Clobbers: w3, x10, v4, v12 */ SYM_FUNC_START_LOCAL(chacha_permute) adr_l x10, ROT8 ld1 {v12.4s}, [x10] .Ldoubleround: // x0 += x1, x3 = rotl32(x3 ^ x0, 16) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b rev32 v3.8h, v3.8h // x2 += x3, x1 = rotl32(x1 ^ x2, 12) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #12 sri v1.4s, v4.4s, #20 // x0 += x1, x3 = rotl32(x3 ^ x0, 8) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b tbl v3.16b, {v3.16b}, v12.16b // x2 += x3, x1 = rotl32(x1 ^ x2, 7) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #7 sri v1.4s, v4.4s, #25 // x1 = shuffle32(x1, MASK(0, 3, 2, 1)) ext v1.16b, v1.16b, v1.16b, #4 // x2 = shuffle32(x2, MASK(1, 0, 3, 2)) ext v2.16b, v2.16b, v2.16b, #8 // x3 = shuffle32(x3, MASK(2, 1, 0, 3)) ext v3.16b, v3.16b, v3.16b, #12 // x0 += x1, x3 = rotl32(x3 ^ x0, 16) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b rev32 v3.8h, v3.8h // x2 += x3, x1 = rotl32(x1 ^ x2, 12) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #12 sri v1.4s, v4.4s, #20 // x0 += x1, x3 = rotl32(x3 ^ x0, 8) add v0.4s, v0.4s, v1.4s eor v3.16b, v3.16b, v0.16b tbl v3.16b, {v3.16b}, v12.16b // x2 += x3, x1 = rotl32(x1 ^ x2, 7) add v2.4s, v2.4s, v3.4s eor v4.16b, v1.16b, v2.16b shl v1.4s, v4.4s, #7 sri v1.4s, v4.4s, #25 // x1 = shuffle32(x1, MASK(2, 1, 0, 3)) ext v1.16b, v1.16b, v1.16b, #12 // x2 = shuffle32(x2, MASK(1, 0, 3, 2)) ext v2.16b, v2.16b, v2.16b, #8 // x3 = shuffle32(x3, MASK(0, 3, 2, 1)) ext v3.16b, v3.16b, v3.16b, #4 subs w3, w3, #2 b.ne .Ldoubleround ret SYM_FUNC_END(chacha_permute) SYM_FUNC_START(chacha_block_xor_neon) // x0: Input state matrix, s // x1: 1 data block output, o // x2: 1 data block input, i // w3: nrounds stp x29, x30, [sp, #-16]! mov x29, sp // x0..3 = s0..3 ld1 {v0.4s-v3.4s}, [x0] ld1 {v8.4s-v11.4s}, [x0] bl chacha_permute ld1 {v4.16b-v7.16b}, [x2] // o0 = i0 ^ (x0 + s0) add v0.4s, v0.4s, v8.4s eor v0.16b, v0.16b, v4.16b // o1 = i1 ^ (x1 + s1) add v1.4s, v1.4s, v9.4s eor v1.16b, v1.16b, v5.16b // o2 = i2 ^ (x2 + s2) add v2.4s, v2.4s, v10.4s eor v2.16b, v2.16b, v6.16b // o3 = i3 ^ (x3 + s3) add v3.4s, v3.4s, v11.4s eor v3.16b, v3.16b, v7.16b st1 {v0.16b-v3.16b}, [x1] ldp x29, x30, [sp], #16 ret SYM_FUNC_END(chacha_block_xor_neon) SYM_FUNC_START(hchacha_block_neon) // x0: Input state matrix, s // x1: output (8 32-bit words) // w2: nrounds stp x29, x30, [sp, #-16]! mov x29, sp ld1 {v0.4s-v3.4s}, [x0] mov w3, w2 bl chacha_permute st1 {v0.4s}, [x1], #16 st1 {v3.4s}, [x1] ldp x29, x30, [sp], #16 ret SYM_FUNC_END(hchacha_block_neon) a0 .req w12 a1 .req w13 a2 .req w14 a3 .req w15 a4 .req w16 a5 .req w17 a6 .req w19 a7 .req w20 a8 .req w21 a9 .req w22 a10 .req w23 a11 .req w24 a12 .req w25 a13 .req w26 a14 .req w27 a15 .req w28 .align 6 SYM_FUNC_START(chacha_4block_xor_neon) frame_push 10 // x0: Input state matrix, s // x1: 4 data blocks output, o // x2: 4 data blocks input, i // w3: nrounds // x4: byte count adr_l x10, .Lpermute and x5, x4, #63 add x10, x10, x5 // // This function encrypts four consecutive ChaCha blocks by loading // the state matrix in NEON registers four times. The algorithm performs // each operation on the corresponding word of each state matrix, hence // requires no word shuffling. For final XORing step we transpose the // matrix by interleaving 32- and then 64-bit words, which allows us to // do XOR in NEON registers. // // At the same time, a fifth block is encrypted in parallel using // scalar registers // adr_l x9, CTRINC // ... and ROT8 ld1 {v30.4s-v31.4s}, [x9] // x0..15[0-3] = s0..3[0..3] add x8, x0, #16 ld4r { v0.4s- v3.4s}, [x0] ld4r { v4.4s- v7.4s}, [x8], #16 ld4r { v8.4s-v11.4s}, [x8], #16 ld4r {v12.4s-v15.4s}, [x8] mov a0, v0.s[0] mov a1, v1.s[0] mov a2, v2.s[0] mov a3, v3.s[0] mov a4, v4.s[0] mov a5, v5.s[0] mov a6, v6.s[0] mov a7, v7.s[0] mov a8, v8.s[0] mov a9, v9.s[0] mov a10, v10.s[0] mov a11, v11.s[0] mov a12, v12.s[0] mov a13, v13.s[0] mov a14, v14.s[0] mov a15, v15.s[0] // x12 += counter values 1-4 add v12.4s, v12.4s, v30.4s .Ldoubleround4: // x0 += x4, x12 = rotl32(x12 ^ x0, 16) // x1 += x5, x13 = rotl32(x13 ^ x1, 16) // x2 += x6, x14 = rotl32(x14 ^ x2, 16) // x3 += x7, x15 = rotl32(x15 ^ x3, 16) add v0.4s, v0.4s, v4.4s add a0, a0, a4 add v1.4s, v1.4s, v5.4s add a1, a1, a5 add v2.4s, v2.4s, v6.4s add a2, a2, a6 add v3.4s, v3.4s, v7.4s add a3, a3, a7 eor v12.16b, v12.16b, v0.16b eor a12, a12, a0 eor v13.16b, v13.16b, v1.16b eor a13, a13, a1 eor v14.16b, v14.16b, v2.16b eor a14, a14, a2 eor v15.16b, v15.16b, v3.16b eor a15, a15, a3 rev32 v12.8h, v12.8h ror a12, a12, #16 rev32 v13.8h, v13.8h ror a13, a13, #16 rev32 v14.8h, v14.8h ror a14, a14, #16 rev32 v15.8h, v15.8h ror a15, a15, #16 // x8 += x12, x4 = rotl32(x4 ^ x8, 12) // x9 += x13, x5 = rotl32(x5 ^ x9, 12) // x10 += x14, x6 = rotl32(x6 ^ x10, 12) // x11 += x15, x7 = rotl32(x7 ^ x11, 12) add v8.4s, v8.4s, v12.4s add a8, a8, a12 add v9.4s, v9.4s, v13.4s add a9, a9, a13 add v10.4s, v10.4s, v14.4s add a10, a10, a14 add v11.4s, v11.4s, v15.4s add a11, a11, a15 eor v16.16b, v4.16b, v8.16b eor a4, a4, a8 eor v17.16b, v5.16b, v9.16b eor a5, a5, a9 eor v18.16b, v6.16b, v10.16b eor a6, a6, a10 eor v19.16b, v7.16b, v11.16b eor a7, a7, a11 shl v4.4s, v16.4s, #12 shl v5.4s, v17.4s, #12 shl v6.4s, v18.4s, #12 shl v7.4s, v19.4s, #12 sri v4.4s, v16.4s, #20 ror a4, a4, #20 sri v5.4s, v17.4s, #20 ror a5, a5, #20 sri v6.4s, v18.4s, #20 ror a6, a6, #20 sri v7.4s, v19.4s, #20 ror a7, a7, #20 // x0 += x4, x12 = rotl32(x12 ^ x0, 8) // x1 += x5, x13 = rotl32(x13 ^ x1, 8) // x2 += x6, x14 = rotl32(x14 ^ x2, 8) // x3 += x7, x15 = rotl32(x15 ^ x3, 8) add v0.4s, v0.4s, v4.4s add a0, a0, a4 add v1.4s, v1.4s, v5.4s add a1, a1, a5 add v2.4s, v2.4s, v6.4s add a2, a2, a6 add v3.4s, v3.4s, v7.4s add a3, a3, a7 eor v12.16b, v12.16b, v0.16b eor a12, a12, a0 eor v13.16b, v13.16b, v1.16b eor a13, a13, a1 eor v14.16b, v14.16b, v2.16b eor a14, a14, a2 eor v15.16b, v15.16b, v3.16b eor a15, a15, a3 tbl v12.16b, {v12.16b}, v31.16b ror a12, a12, #24 tbl v13.16b, {v13.16b}, v31.16b ror a13, a13, #24 tbl v14.16b, {v14.16b}, v31.16b ror a14, a14, #24 tbl v15.16b, {v15.16b}, v31.16b ror a15, a15, #24 // x8 += x12, x4 = rotl32(x4 ^ x8, 7) // x9 += x13, x5 = rotl32(x5 ^ x9, 7) // x10 += x14, x6 = rotl32(x6 ^ x10, 7) // x11 += x15, x7 = rotl32(x7 ^ x11, 7) add v8.4s, v8.4s, v12.4s add a8, a8, a12 add v9.4s, v9.4s, v13.4s add a9, a9, a13 add v10.4s, v10.4s, v14.4s add a10, a10, a14 add v11.4s, v11.4s, v15.4s add a11, a11, a15 eor v16.16b, v4.16b, v8.16b eor a4, a4, a8 eor v17.16b, v5.16b, v9.16b eor a5, a5, a9 eor v18.16b, v6.16b, v10.16b eor a6, a6, a10 eor v19.16b, v7.16b, v11.16b eor a7, a7, a11 shl v4.4s, v16.4s, #7 shl v5.4s, v17.4s, #7 shl v6.4s, v18.4s, #7 shl v7.4s, v19.4s, #7 sri v4.4s, v16.4s, #25 ror a4, a4, #25 sri v5.4s, v17.4s, #25 ror a5, a5, #25 sri v6.4s, v18.4s, #25 ror a6, a6, #25 sri v7.4s, v19.4s, #25 ror a7, a7, #25 // x0 += x5, x15 = rotl32(x15 ^ x0, 16) // x1 += x6, x12 = rotl32(x12 ^ x1, 16) // x2 += x7, x13 = rotl32(x13 ^ x2, 16) // x3 += x4, x14 = rotl32(x14 ^ x3, 16) add v0.4s, v0.4s, v5.4s add a0, a0, a5 add v1.4s, v1.4s, v6.4s add a1, a1, a6 add v2.4s, v2.4s, v7.4s add a2, a2, a7 add v3.4s, v3.4s, v4.4s add a3, a3, a4 eor v15.16b, v15.16b, v0.16b eor a15, a15, a0 eor v12.16b, v12.16b, v1.16b eor a12, a12, a1 eor v13.16b, v13.16b, v2.16b eor a13, a13, a2 eor v14.16b, v14.16b, v3.16b eor a14, a14, a3 rev32 v15.8h, v15.8h ror a15, a15, #16 rev32 v12.8h, v12.8h ror a12, a12, #16 rev32 v13.8h, v13.8h ror a13, a13, #16 rev32 v14.8h, v14.8h ror a14, a14, #16 // x10 += x15, x5 = rotl32(x5 ^ x10, 12) // x11 += x12, x6 = rotl32(x6 ^ x11, 12) // x8 += x13, x7 = rotl32(x7 ^ x8, 12) // x9 += x14, x4 = rotl32(x4 ^ x9, 12) add v10.4s, v10.4s, v15.4s add a10, a10, a15 add v11.4s, v11.4s, v12.4s add a11, a11, a12 add v8.4s, v8.4s, v13.4s add a8, a8, a13 add v9.4s, v9.4s, v14.4s add a9, a9, a14 eor v16.16b, v5.16b, v10.16b eor a5, a5, a10 eor v17.16b, v6.16b, v11.16b eor a6, a6, a11 eor v18.16b, v7.16b, v8.16b eor a7, a7, a8 eor v19.16b, v4.16b, v9.16b eor a4, a4, a9 shl v5.4s, v16.4s, #12 shl v6.4s, v17.4s, #12 shl v7.4s, v18.4s, #12 shl v4.4s, v19.4s, #12 sri v5.4s, v16.4s, #20 ror a5, a5, #20 sri v6.4s, v17.4s, #20 ror a6, a6, #20 sri v7.4s, v18.4s, #20 ror a7, a7, #20 sri v4.4s, v19.4s, #20 ror a4, a4, #20 // x0 += x5, x15 = rotl32(x15 ^ x0, 8) // x1 += x6, x12 = rotl32(x12 ^ x1, 8) // x2 += x7, x13 = rotl32(x13 ^ x2, 8) // x3 += x4, x14 = rotl32(x14 ^ x3, 8) add v0.4s, v0.4s, v5.4s add a0, a0, a5 add v1.4s, v1.4s, v6.4s add a1, a1, a6 add v2.4s, v2.4s, v7.4s add a2, a2, a7 add v3.4s, v3.4s, v4.4s add a3, a3, a4 eor v15.16b, v15.16b, v0.16b eor a15, a15, a0 eor v12.16b, v12.16b, v1.16b eor a12, a12, a1 eor v13.16b, v13.16b, v2.16b eor a13, a13, a2 eor v14.16b, v14.16b, v3.16b eor a14, a14, a3 tbl v15.16b, {v15.16b}, v31.16b ror a15, a15, #24 tbl v12.16b, {v12.16b}, v31.16b ror a12, a12, #24 tbl v13.16b, {v13.16b}, v31.16b ror a13, a13, #24 tbl v14.16b, {v14.16b}, v31.16b ror a14, a14, #24 // x10 += x15, x5 = rotl32(x5 ^ x10, 7) // x11 += x12, x6 = rotl32(x6 ^ x11, 7) // x8 += x13, x7 = rotl32(x7 ^ x8, 7) // x9 += x14, x4 = rotl32(x4 ^ x9, 7) add v10.4s, v10.4s, v15.4s add a10, a10, a15 add v11.4s, v11.4s, v12.4s add a11, a11, a12 add v8.4s, v8.4s, v13.4s add a8, a8, a13 add v9.4s, v9.4s, v14.4s add a9, a9, a14 eor v16.16b, v5.16b, v10.16b eor a5, a5, a10 eor v17.16b, v6.16b, v11.16b eor a6, a6, a11 eor v18.16b, v7.16b, v8.16b eor a7, a7, a8 eor v19.16b, v4.16b, v9.16b eor a4, a4, a9 shl v5.4s, v16.4s, #7 shl v6.4s, v17.4s, #7 shl v7.4s, v18.4s, #7 shl v4.4s, v19.4s, #7 sri v5.4s, v16.4s, #25 ror a5, a5, #25 sri v6.4s, v17.4s, #25 ror a6, a6, #25 sri v7.4s, v18.4s, #25 ror a7, a7, #25 sri v4.4s, v19.4s, #25 ror a4, a4, #25 subs w3, w3, #2 b.ne .Ldoubleround4 ld4r {v16.4s-v19.4s}, [x0], #16 ld4r {v20.4s-v23.4s}, [x0], #16 // x12 += counter values 0-3 add v12.4s, v12.4s, v30.4s // x0[0-3] += s0[0] // x1[0-3] += s0[1] // x2[0-3] += s0[2] // x3[0-3] += s0[3] add v0.4s, v0.4s, v16.4s mov w6, v16.s[0] mov w7, v17.s[0] add v1.4s, v1.4s, v17.4s mov w8, v18.s[0] mov w9, v19.s[0] add v2.4s, v2.4s, v18.4s add a0, a0, w6 add a1, a1, w7 add v3.4s, v3.4s, v19.4s add a2, a2, w8 add a3, a3, w9 CPU_BE( rev a0, a0 ) CPU_BE( rev a1, a1 ) CPU_BE( rev a2, a2 ) CPU_BE( rev a3, a3 ) ld4r {v24.4s-v27.4s}, [x0], #16 ld4r {v28.4s-v31.4s}, [x0] // x4[0-3] += s1[0] // x5[0-3] += s1[1] // x6[0-3] += s1[2] // x7[0-3] += s1[3] add v4.4s, v4.4s, v20.4s mov w6, v20.s[0] mov w7, v21.s[0] add v5.4s, v5.4s, v21.4s mov w8, v22.s[0] mov w9, v23.s[0] add v6.4s, v6.4s, v22.4s add a4, a4, w6 add a5, a5, w7 add v7.4s, v7.4s, v23.4s add a6, a6, w8 add a7, a7, w9 CPU_BE( rev a4, a4 ) CPU_BE( rev a5, a5 ) CPU_BE( rev a6, a6 ) CPU_BE( rev a7, a7 ) // x8[0-3] += s2[0] // x9[0-3] += s2[1] // x10[0-3] += s2[2] // x11[0-3] += s2[3] add v8.4s, v8.4s, v24.4s mov w6, v24.s[0] mov w7, v25.s[0] add v9.4s, v9.4s, v25.4s mov w8, v26.s[0] mov w9, v27.s[0] add v10.4s, v10.4s, v26.4s add a8, a8, w6 add a9, a9, w7 add v11.4s, v11.4s, v27.4s add a10, a10, w8 add a11, a11, w9 CPU_BE( rev a8, a8 ) CPU_BE( rev a9, a9 ) CPU_BE( rev a10, a10 ) CPU_BE( rev a11, a11 ) // x12[0-3] += s3[0] // x13[0-3] += s3[1] // x14[0-3] += s3[2] // x15[0-3] += s3[3] add v12.4s, v12.4s, v28.4s mov w6, v28.s[0] mov w7, v29.s[0] add v13.4s, v13.4s, v29.4s mov w8, v30.s[0] mov w9, v31.s[0] add v14.4s, v14.4s, v30.4s add a12, a12, w6 add a13, a13, w7 add v15.4s, v15.4s, v31.4s add a14, a14, w8 add a15, a15, w9 CPU_BE( rev a12, a12 ) CPU_BE( rev a13, a13 ) CPU_BE( rev a14, a14 ) CPU_BE( rev a15, a15 ) // interleave 32-bit words in state n, n+1 ldp w6, w7, [x2], #64 zip1 v16.4s, v0.4s, v1.4s ldp w8, w9, [x2, #-56] eor a0, a0, w6 zip2 v17.4s, v0.4s, v1.4s eor a1, a1, w7 zip1 v18.4s, v2.4s, v3.4s eor a2, a2, w8 zip2 v19.4s, v2.4s, v3.4s eor a3, a3, w9 ldp w6, w7, [x2, #-48] zip1 v20.4s, v4.4s, v5.4s ldp w8, w9, [x2, #-40] eor a4, a4, w6 zip2 v21.4s, v4.4s, v5.4s eor a5, a5, w7 zip1 v22.4s, v6.4s, v7.4s eor a6, a6, w8 zip2 v23.4s, v6.4s, v7.4s eor a7, a7, w9 ldp w6, w7, [x2, #-32] zip1 v24.4s, v8.4s, v9.4s ldp w8, w9, [x2, #-24] eor a8, a8, w6 zip2 v25.4s, v8.4s, v9.4s eor a9, a9, w7 zip1 v26.4s, v10.4s, v11.4s eor a10, a10, w8 zip2 v27.4s, v10.4s, v11.4s eor a11, a11, w9 ldp w6, w7, [x2, #-16] zip1 v28.4s, v12.4s, v13.4s ldp w8, w9, [x2, #-8] eor a12, a12, w6 zip2 v29.4s, v12.4s, v13.4s eor a13, a13, w7 zip1 v30.4s, v14.4s, v15.4s eor a14, a14, w8 zip2 v31.4s, v14.4s, v15.4s eor a15, a15, w9 add x3, x2, x4 sub x3, x3, #128 // start of last block subs x5, x4, #128 csel x2, x2, x3, ge // interleave 64-bit words in state n, n+2 zip1 v0.2d, v16.2d, v18.2d zip2 v4.2d, v16.2d, v18.2d stp a0, a1, [x1], #64 zip1 v8.2d, v17.2d, v19.2d zip2 v12.2d, v17.2d, v19.2d stp a2, a3, [x1, #-56] subs x6, x4, #192 ld1 {v16.16b-v19.16b}, [x2], #64 csel x2, x2, x3, ge zip1 v1.2d, v20.2d, v22.2d zip2 v5.2d, v20.2d, v22.2d stp a4, a5, [x1, #-48] zip1 v9.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d stp a6, a7, [x1, #-40] subs x7, x4, #256 ld1 {v20.16b-v23.16b}, [x2], #64 csel x2, x2, x3, ge zip1 v2.2d, v24.2d, v26.2d zip2 v6.2d, v24.2d, v26.2d stp a8, a9, [x1, #-32] zip1 v10.2d, v25.2d, v27.2d zip2 v14.2d, v25.2d, v27.2d stp a10, a11, [x1, #-24] subs x8, x4, #320 ld1 {v24.16b-v27.16b}, [x2], #64 csel x2, x2, x3, ge zip1 v3.2d, v28.2d, v30.2d zip2 v7.2d, v28.2d, v30.2d stp a12, a13, [x1, #-16] zip1 v11.2d, v29.2d, v31.2d zip2 v15.2d, v29.2d, v31.2d stp a14, a15, [x1, #-8] tbnz x5, #63, .Lt128 ld1 {v28.16b-v31.16b}, [x2] // xor with corresponding input, write to output eor v16.16b, v16.16b, v0.16b eor v17.16b, v17.16b, v1.16b eor v18.16b, v18.16b, v2.16b eor v19.16b, v19.16b, v3.16b tbnz x6, #63, .Lt192 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v6.16b eor v23.16b, v23.16b, v7.16b st1 {v16.16b-v19.16b}, [x1], #64 tbnz x7, #63, .Lt256 eor v24.16b, v24.16b, v8.16b eor v25.16b, v25.16b, v9.16b eor v26.16b, v26.16b, v10.16b eor v27.16b, v27.16b, v11.16b st1 {v20.16b-v23.16b}, [x1], #64 tbnz x8, #63, .Lt320 eor v28.16b, v28.16b, v12.16b eor v29.16b, v29.16b, v13.16b eor v30.16b, v30.16b, v14.16b eor v31.16b, v31.16b, v15.16b st1 {v24.16b-v27.16b}, [x1], #64 st1 {v28.16b-v31.16b}, [x1] .Lout: frame_pop ret // fewer than 192 bytes of in/output .Lt192: cbz x5, 1f // exactly 128 bytes? ld1 {v28.16b-v31.16b}, [x10] add x5, x5, x1 tbl v28.16b, {v4.16b-v7.16b}, v28.16b tbl v29.16b, {v4.16b-v7.16b}, v29.16b tbl v30.16b, {v4.16b-v7.16b}, v30.16b tbl v31.16b, {v4.16b-v7.16b}, v31.16b 0: eor v20.16b, v20.16b, v28.16b eor v21.16b, v21.16b, v29.16b eor v22.16b, v22.16b, v30.16b eor v23.16b, v23.16b, v31.16b st1 {v20.16b-v23.16b}, [x5] // overlapping stores 1: st1 {v16.16b-v19.16b}, [x1] b .Lout // fewer than 128 bytes of in/output .Lt128: ld1 {v28.16b-v31.16b}, [x10] add x5, x5, x1 sub x1, x1, #64 tbl v28.16b, {v0.16b-v3.16b}, v28.16b tbl v29.16b, {v0.16b-v3.16b}, v29.16b tbl v30.16b, {v0.16b-v3.16b}, v30.16b tbl v31.16b, {v0.16b-v3.16b}, v31.16b ld1 {v16.16b-v19.16b}, [x1] // reload first output block b 0b // fewer than 256 bytes of in/output .Lt256: cbz x6, 2f // exactly 192 bytes? ld1 {v4.16b-v7.16b}, [x10] add x6, x6, x1 tbl v0.16b, {v8.16b-v11.16b}, v4.16b tbl v1.16b, {v8.16b-v11.16b}, v5.16b tbl v2.16b, {v8.16b-v11.16b}, v6.16b tbl v3.16b, {v8.16b-v11.16b}, v7.16b eor v28.16b, v28.16b, v0.16b eor v29.16b, v29.16b, v1.16b eor v30.16b, v30.16b, v2.16b eor v31.16b, v31.16b, v3.16b st1 {v28.16b-v31.16b}, [x6] // overlapping stores 2: st1 {v20.16b-v23.16b}, [x1] b .Lout // fewer than 320 bytes of in/output .Lt320: cbz x7, 3f // exactly 256 bytes? ld1 {v4.16b-v7.16b}, [x10] add x7, x7, x1 tbl v0.16b, {v12.16b-v15.16b}, v4.16b tbl v1.16b, {v12.16b-v15.16b}, v5.16b tbl v2.16b, {v12.16b-v15.16b}, v6.16b tbl v3.16b, {v12.16b-v15.16b}, v7.16b eor v28.16b, v28.16b, v0.16b eor v29.16b, v29.16b, v1.16b eor v30.16b, v30.16b, v2.16b eor v31.16b, v31.16b, v3.16b st1 {v28.16b-v31.16b}, [x7] // overlapping stores 3: st1 {v24.16b-v27.16b}, [x1] b .Lout SYM_FUNC_END(chacha_4block_xor_neon) .section ".rodata", "a", %progbits .align L1_CACHE_SHIFT .Lpermute: .set .Li, 0 .rept 128 .byte (.Li - 64) .set .Li, .Li + 1 .endr CTRINC: .word 1, 2, 3, 4 ROT8: .word 0x02010003, 0x06050407, 0x0a09080b, 0x0e0d0c0f
aixcc-public/challenge-001-exemplar-source
2,173
arch/arm64/crypto/nh-neon-core.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * NH - ε-almost-universal hash function, ARM64 NEON accelerated version * * Copyright 2018 Google LLC * * Author: Eric Biggers <ebiggers@google.com> */ #include <linux/linkage.h> KEY .req x0 MESSAGE .req x1 MESSAGE_LEN .req x2 HASH .req x3 PASS0_SUMS .req v0 PASS1_SUMS .req v1 PASS2_SUMS .req v2 PASS3_SUMS .req v3 K0 .req v4 K1 .req v5 K2 .req v6 K3 .req v7 T0 .req v8 T1 .req v9 T2 .req v10 T3 .req v11 T4 .req v12 T5 .req v13 T6 .req v14 T7 .req v15 .macro _nh_stride k0, k1, k2, k3 // Load next message stride ld1 {T3.16b}, [MESSAGE], #16 // Load next key stride ld1 {\k3\().4s}, [KEY], #16 // Add message words to key words add T0.4s, T3.4s, \k0\().4s add T1.4s, T3.4s, \k1\().4s add T2.4s, T3.4s, \k2\().4s add T3.4s, T3.4s, \k3\().4s // Multiply 32x32 => 64 and accumulate mov T4.d[0], T0.d[1] mov T5.d[0], T1.d[1] mov T6.d[0], T2.d[1] mov T7.d[0], T3.d[1] umlal PASS0_SUMS.2d, T0.2s, T4.2s umlal PASS1_SUMS.2d, T1.2s, T5.2s umlal PASS2_SUMS.2d, T2.2s, T6.2s umlal PASS3_SUMS.2d, T3.2s, T7.2s .endm /* * void nh_neon(const u32 *key, const u8 *message, size_t message_len, * u8 hash[NH_HASH_BYTES]) * * It's guaranteed that message_len % 16 == 0. */ SYM_FUNC_START(nh_neon) ld1 {K0.4s,K1.4s}, [KEY], #32 movi PASS0_SUMS.2d, #0 movi PASS1_SUMS.2d, #0 ld1 {K2.4s}, [KEY], #16 movi PASS2_SUMS.2d, #0 movi PASS3_SUMS.2d, #0 subs MESSAGE_LEN, MESSAGE_LEN, #64 blt .Lloop4_done .Lloop4: _nh_stride K0, K1, K2, K3 _nh_stride K1, K2, K3, K0 _nh_stride K2, K3, K0, K1 _nh_stride K3, K0, K1, K2 subs MESSAGE_LEN, MESSAGE_LEN, #64 bge .Lloop4 .Lloop4_done: ands MESSAGE_LEN, MESSAGE_LEN, #63 beq .Ldone _nh_stride K0, K1, K2, K3 subs MESSAGE_LEN, MESSAGE_LEN, #16 beq .Ldone _nh_stride K1, K2, K3, K0 subs MESSAGE_LEN, MESSAGE_LEN, #16 beq .Ldone _nh_stride K2, K3, K0, K1 .Ldone: // Sum the accumulators for each pass, then store the sums to 'hash' addp T0.2d, PASS0_SUMS.2d, PASS1_SUMS.2d addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d st1 {T0.16b,T1.16b}, [HASH] ret SYM_FUNC_END(nh_neon)
aixcc-public/challenge-001-exemplar-source
22,048
arch/arm64/crypto/sm3-neon-core.S
// SPDX-License-Identifier: GPL-2.0-or-later /* * sm3-neon-core.S - SM3 secure hash using NEON instructions * * Linux/arm64 port of the libgcrypt SM3 implementation for AArch64 * * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi> * Copyright (c) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/assembler.h> /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 #define state_h5 20 #define state_h6 24 #define state_h7 28 /* Stack structure */ #define STACK_W_SIZE (32 * 2 * 3) #define STACK_W (0) #define STACK_SIZE (STACK_W + STACK_W_SIZE) /* Register macros */ #define RSTATE x0 #define RDATA x1 #define RNBLKS x2 #define RKPTR x28 #define RFRAME x29 #define ra w3 #define rb w4 #define rc w5 #define rd w6 #define re w7 #define rf w8 #define rg w9 #define rh w10 #define t0 w11 #define t1 w12 #define t2 w13 #define t3 w14 #define t4 w15 #define t5 w16 #define t6 w17 #define k_even w19 #define k_odd w20 #define addr0 x21 #define addr1 x22 #define s0 w23 #define s1 w24 #define s2 w25 #define s3 w26 #define W0 v0 #define W1 v1 #define W2 v2 #define W3 v3 #define W4 v4 #define W5 v5 #define XTMP0 v6 #define XTMP1 v7 #define XTMP2 v16 #define XTMP3 v17 #define XTMP4 v18 #define XTMP5 v19 #define XTMP6 v20 /* Helper macros. */ #define _(...) /*_*/ #define clear_vec(x) \ movi x.8h, #0; #define rolw(o, a, n) \ ror o, a, #(32 - n); /* Round function macros. */ #define GG1_1(x, y, z, o, t) \ eor o, x, y; #define GG1_2(x, y, z, o, t) \ eor o, o, z; #define GG1_3(x, y, z, o, t) #define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t) #define FF1_2(x, y, z, o, t) #define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t) #define GG2_1(x, y, z, o, t) \ bic o, z, x; #define GG2_2(x, y, z, o, t) \ and t, y, x; #define GG2_3(x, y, z, o, t) \ eor o, o, t; #define FF2_1(x, y, z, o, t) \ eor o, x, y; #define FF2_2(x, y, z, o, t) \ and t, x, y; \ and o, o, z; #define FF2_3(x, y, z, o, t) \ eor o, o, t; #define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ K_LOAD(round); \ ldr t5, [sp, #(wtype##_W1_ADDR(round, widx))]; \ rolw(t0, a, 12); /* rol(a, 12) => t0 */ \ IOP(1, iop_param); \ FF##i##_1(a, b, c, t1, t2); \ ldr t6, [sp, #(wtype##_W1W2_ADDR(round, widx))]; \ add k, k, e; \ IOP(2, iop_param); \ GG##i##_1(e, f, g, t3, t4); \ FF##i##_2(a, b, c, t1, t2); \ IOP(3, iop_param); \ add k, k, t0; \ add h, h, t5; \ add d, d, t6; /* w1w2 + d => d */ \ IOP(4, iop_param); \ rolw(k, k, 7); /* rol (t0 + e + t), 7) => k */ \ GG##i##_2(e, f, g, t3, t4); \ add h, h, k; /* h + w1 + k => h */ \ IOP(5, iop_param); \ FF##i##_3(a, b, c, t1, t2); \ eor t0, t0, k; /* k ^ t0 => t0 */ \ GG##i##_3(e, f, g, t3, t4); \ add d, d, t1; /* FF(a,b,c) + d => d */ \ IOP(6, iop_param); \ add t3, t3, h; /* GG(e,f,g) + h => t3 */ \ rolw(b, b, 9); /* rol(b, 9) => b */ \ eor h, t3, t3, ror #(32-9); \ IOP(7, iop_param); \ add d, d, t0; /* t0 + d => d */ \ rolw(f, f, 19); /* rol(f, 19) => f */ \ IOP(8, iop_param); \ eor h, h, t3, ror #(32-17); /* P0(t3) => h */ #define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) #define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) #define KL(round) \ ldp k_even, k_odd, [RKPTR, #(4*(round))]; /* Input expansion macros. */ /* Byte-swapped input address. */ #define IW_W_ADDR(round, widx, offs) \ (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4)) /* Expanded input address. */ #define XW_W_ADDR(round, widx, offs) \ (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4)) /* Rounds 1-12, byte-swapped input block addresses. */ #define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 32) #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48) /* Rounds 1-12, expanded input block addresses. */ #define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0) #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16) /* Input block loading. * Interleaving within round function needed for in-order CPUs. */ #define LOAD_W_VEC_1_1() \ add addr0, sp, #IW_W1_ADDR(0, 0); #define LOAD_W_VEC_1_2() \ add addr1, sp, #IW_W1_ADDR(4, 0); #define LOAD_W_VEC_1_3() \ ld1 {W0.16b}, [RDATA], #16; #define LOAD_W_VEC_1_4() \ ld1 {W1.16b}, [RDATA], #16; #define LOAD_W_VEC_1_5() \ ld1 {W2.16b}, [RDATA], #16; #define LOAD_W_VEC_1_6() \ ld1 {W3.16b}, [RDATA], #16; #define LOAD_W_VEC_1_7() \ rev32 XTMP0.16b, W0.16b; #define LOAD_W_VEC_1_8() \ rev32 XTMP1.16b, W1.16b; #define LOAD_W_VEC_2_1() \ rev32 XTMP2.16b, W2.16b; #define LOAD_W_VEC_2_2() \ rev32 XTMP3.16b, W3.16b; #define LOAD_W_VEC_2_3() \ eor XTMP4.16b, XTMP1.16b, XTMP0.16b; #define LOAD_W_VEC_2_4() \ eor XTMP5.16b, XTMP2.16b, XTMP1.16b; #define LOAD_W_VEC_2_5() \ st1 {XTMP0.16b}, [addr0], #16; #define LOAD_W_VEC_2_6() \ st1 {XTMP4.16b}, [addr0]; \ add addr0, sp, #IW_W1_ADDR(8, 0); #define LOAD_W_VEC_2_7() \ eor XTMP6.16b, XTMP3.16b, XTMP2.16b; #define LOAD_W_VEC_2_8() \ ext W0.16b, XTMP0.16b, XTMP0.16b, #8; /* W0: xx, w0, xx, xx */ #define LOAD_W_VEC_3_1() \ mov W2.16b, XTMP1.16b; /* W2: xx, w6, w5, w4 */ #define LOAD_W_VEC_3_2() \ st1 {XTMP1.16b}, [addr1], #16; #define LOAD_W_VEC_3_3() \ st1 {XTMP5.16b}, [addr1]; \ ext W1.16b, XTMP0.16b, XTMP0.16b, #4; /* W1: xx, w3, w2, w1 */ #define LOAD_W_VEC_3_4() \ ext W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */ #define LOAD_W_VEC_3_5() \ ext W4.16b, XTMP2.16b, XTMP3.16b, #8; /* W4: xx, w12, w11, w10 */ #define LOAD_W_VEC_3_6() \ st1 {XTMP2.16b}, [addr0], #16; #define LOAD_W_VEC_3_7() \ st1 {XTMP6.16b}, [addr0]; #define LOAD_W_VEC_3_8() \ ext W5.16b, XTMP3.16b, XTMP3.16b, #4; /* W5: xx, w15, w14, w13 */ #define LOAD_W_VEC_1(iop_num, ...) \ LOAD_W_VEC_1_##iop_num() #define LOAD_W_VEC_2(iop_num, ...) \ LOAD_W_VEC_2_##iop_num() #define LOAD_W_VEC_3(iop_num, ...) \ LOAD_W_VEC_3_##iop_num() /* Message scheduling. Note: 3 words per vector register. * Interleaving within round function needed for in-order CPUs. */ #define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \ /* Load (w[i - 16]) => XTMP0 */ \ /* Load (w[i - 13]) => XTMP5 */ \ ext XTMP0.16b, w0.16b, w0.16b, #12; /* XTMP0: w0, xx, xx, xx */ #define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \ ext XTMP5.16b, w1.16b, w1.16b, #12; #define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \ ext XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */ #define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \ ext XTMP5.16b, XTMP5.16b, w2.16b, #12; #define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 9] == w3 */ \ /* W3 ^ XTMP0 => XTMP0 */ \ eor XTMP0.16b, XTMP0.16b, w3.16b; #define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 3] == w5 */ \ /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \ /* rol(XTMP5, 7) => XTMP1 */ \ add addr0, sp, #XW_W1_ADDR((round), 0); \ shl XTMP2.4s, w5.4s, #15; #define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \ shl XTMP1.4s, XTMP5.4s, #7; #define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \ sri XTMP2.4s, w5.4s, #(32-15); #define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \ sri XTMP1.4s, XTMP5.4s, #(32-7); #define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \ eor XTMP0.16b, XTMP0.16b, XTMP2.16b; #define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 6] == W4 */ \ /* W4 ^ XTMP1 => XTMP1 */ \ eor XTMP1.16b, XTMP1.16b, w4.16b; #define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \ /* P1(XTMP0) ^ XTMP1 => W0 */ \ shl XTMP3.4s, XTMP0.4s, #15; #define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \ shl XTMP4.4s, XTMP0.4s, #23; #define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \ eor w0.16b, XTMP1.16b, XTMP0.16b; #define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \ sri XTMP3.4s, XTMP0.4s, #(32-15); #define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \ sri XTMP4.4s, XTMP0.4s, #(32-23); #define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \ eor w0.16b, w0.16b, XTMP3.16b; #define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \ /* Load (w[i - 3]) => XTMP2 */ \ ext XTMP2.16b, w4.16b, w4.16b, #12; #define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \ eor w0.16b, w0.16b, XTMP4.16b; #define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \ ext XTMP2.16b, XTMP2.16b, w5.16b, #12; #define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \ /* W1 ^ W2 => XTMP3 */ \ eor XTMP3.16b, XTMP2.16b, w0.16b; #define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5) #define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \ st1 {XTMP2.16b-XTMP3.16b}, [addr0]; #define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5) #define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5) #define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5) #define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5) #define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0) #define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0) #define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0) #define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1) #define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1) #define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1) #define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2) #define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2) #define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2) #define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3) #define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3) #define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3) #define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4) #define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4) #define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4) /* * Transform blocks*64 bytes (blocks*16 32-bit words) at 'src'. * * void sm3_neon_transform(struct sm3_state *sst, u8 const *src, * int blocks) */ .text .align 3 SYM_TYPED_FUNC_START(sm3_neon_transform) ldp ra, rb, [RSTATE, #0] ldp rc, rd, [RSTATE, #8] ldp re, rf, [RSTATE, #16] ldp rg, rh, [RSTATE, #24] stp x28, x29, [sp, #-16]! stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! stp x25, x26, [sp, #-16]! mov RFRAME, sp sub addr0, sp, #STACK_SIZE adr_l RKPTR, .LKtable and sp, addr0, #(~63) /* Preload first block. */ LOAD_W_VEC_1(1, 0) LOAD_W_VEC_1(2, 0) LOAD_W_VEC_1(3, 0) LOAD_W_VEC_1(4, 0) LOAD_W_VEC_1(5, 0) LOAD_W_VEC_1(6, 0) LOAD_W_VEC_1(7, 0) LOAD_W_VEC_1(8, 0) LOAD_W_VEC_2(1, 0) LOAD_W_VEC_2(2, 0) LOAD_W_VEC_2(3, 0) LOAD_W_VEC_2(4, 0) LOAD_W_VEC_2(5, 0) LOAD_W_VEC_2(6, 0) LOAD_W_VEC_2(7, 0) LOAD_W_VEC_2(8, 0) LOAD_W_VEC_3(1, 0) LOAD_W_VEC_3(2, 0) LOAD_W_VEC_3(3, 0) LOAD_W_VEC_3(4, 0) LOAD_W_VEC_3(5, 0) LOAD_W_VEC_3(6, 0) LOAD_W_VEC_3(7, 0) LOAD_W_VEC_3(8, 0) .balign 16 .Loop: /* Transform 0-3 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0) R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0) R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0) R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0) /* Transform 4-7 + Precalc 12-14 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0) R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0) R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12) R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12) /* Transform 8-11 + Precalc 12-17 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12) R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15) R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15) R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15) /* Transform 12-14 + Precalc 18-20 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18) R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18) R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18) /* Transform 15-17 + Precalc 21-23 */ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21) /* Transform 18-20 + Precalc 24-26 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24) /* Transform 21-23 + Precalc 27-29 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27) /* Transform 24-26 + Precalc 30-32 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30) /* Transform 27-29 + Precalc 33-35 */ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33) /* Transform 30-32 + Precalc 36-38 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36) /* Transform 33-35 + Precalc 39-41 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39) /* Transform 36-38 + Precalc 42-44 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42) /* Transform 39-41 + Precalc 45-47 */ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45) /* Transform 42-44 + Precalc 48-50 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48) /* Transform 45-47 + Precalc 51-53 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51) /* Transform 48-50 + Precalc 54-56 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54) /* Transform 51-53 + Precalc 57-59 */ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57) /* Transform 54-56 + Precalc 60-62 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60) /* Transform 57-59 + Precalc 63 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63) /* Transform 60 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _) subs RNBLKS, RNBLKS, #1 b.eq .Lend /* Transform 61-63 + Preload next block */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, LOAD_W_VEC_1, _) ldp s0, s1, [RSTATE, #0] R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _) ldp s2, s3, [RSTATE, #8] R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, LOAD_W_VEC_3, _) /* Update the chaining variables. */ eor ra, ra, s0 eor rb, rb, s1 ldp s0, s1, [RSTATE, #16] eor rc, rc, s2 ldp k_even, k_odd, [RSTATE, #24] eor rd, rd, s3 eor re, re, s0 stp ra, rb, [RSTATE, #0] eor rf, rf, s1 stp rc, rd, [RSTATE, #8] eor rg, rg, k_even stp re, rf, [RSTATE, #16] eor rh, rh, k_odd stp rg, rh, [RSTATE, #24] b .Loop .Lend: /* Transform 61-63 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, _, _) ldp s0, s1, [RSTATE, #0] R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _) ldp s2, s3, [RSTATE, #8] R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, _, _) /* Update the chaining variables. */ eor ra, ra, s0 clear_vec(W0) eor rb, rb, s1 clear_vec(W1) ldp s0, s1, [RSTATE, #16] clear_vec(W2) eor rc, rc, s2 clear_vec(W3) ldp k_even, k_odd, [RSTATE, #24] clear_vec(W4) eor rd, rd, s3 clear_vec(W5) eor re, re, s0 clear_vec(XTMP0) stp ra, rb, [RSTATE, #0] clear_vec(XTMP1) eor rf, rf, s1 clear_vec(XTMP2) stp rc, rd, [RSTATE, #8] clear_vec(XTMP3) eor rg, rg, k_even clear_vec(XTMP4) stp re, rf, [RSTATE, #16] clear_vec(XTMP5) eor rh, rh, k_odd clear_vec(XTMP6) stp rg, rh, [RSTATE, #24] /* Clear message expansion area */ add addr0, sp, #STACK_W st1 {W0.16b-W3.16b}, [addr0], #64 st1 {W0.16b-W3.16b}, [addr0], #64 st1 {W0.16b-W3.16b}, [addr0] mov sp, RFRAME ldp x25, x26, [sp], #16 ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ldp x28, x29, [sp], #16 ret SYM_FUNC_END(sm3_neon_transform) .section ".rodata", "a" .align 4 .LKtable: .long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb .long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc .long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce .long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6 .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 .long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53 .long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d .long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4 .long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43 .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
aixcc-public/challenge-001-exemplar-source
1,715
arch/arm64/crypto/aes-ce-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> .arch armv8-a+crypto SYM_FUNC_START(__aes_ce_encrypt) sub w3, w3, #2 ld1 {v0.16b}, [x2] ld1 {v1.4s}, [x0], #16 cmp w3, #10 bmi 0f bne 3f mov v3.16b, v1.16b b 2f 0: mov v2.16b, v1.16b ld1 {v3.4s}, [x0], #16 1: aese v0.16b, v2.16b aesmc v0.16b, v0.16b 2: ld1 {v1.4s}, [x0], #16 aese v0.16b, v3.16b aesmc v0.16b, v0.16b 3: ld1 {v2.4s}, [x0], #16 subs w3, w3, #3 aese v0.16b, v1.16b aesmc v0.16b, v0.16b ld1 {v3.4s}, [x0], #16 bpl 1b aese v0.16b, v2.16b eor v0.16b, v0.16b, v3.16b st1 {v0.16b}, [x1] ret SYM_FUNC_END(__aes_ce_encrypt) SYM_FUNC_START(__aes_ce_decrypt) sub w3, w3, #2 ld1 {v0.16b}, [x2] ld1 {v1.4s}, [x0], #16 cmp w3, #10 bmi 0f bne 3f mov v3.16b, v1.16b b 2f 0: mov v2.16b, v1.16b ld1 {v3.4s}, [x0], #16 1: aesd v0.16b, v2.16b aesimc v0.16b, v0.16b 2: ld1 {v1.4s}, [x0], #16 aesd v0.16b, v3.16b aesimc v0.16b, v0.16b 3: ld1 {v2.4s}, [x0], #16 subs w3, w3, #3 aesd v0.16b, v1.16b aesimc v0.16b, v0.16b ld1 {v3.4s}, [x0], #16 bpl 1b aesd v0.16b, v2.16b eor v0.16b, v0.16b, v3.16b st1 {v0.16b}, [x1] ret SYM_FUNC_END(__aes_ce_decrypt) /* * __aes_ce_sub() - use the aese instruction to perform the AES sbox * substitution on each byte in 'input' */ SYM_FUNC_START(__aes_ce_sub) dup v1.4s, w0 movi v0.16b, #0 aese v0.16b, v1.16b umov w0, v0.s[0] ret SYM_FUNC_END(__aes_ce_sub) SYM_FUNC_START(__aes_ce_invert) ld1 {v0.4s}, [x1] aesimc v1.16b, v0.16b st1 {v1.4s}, [x0] ret SYM_FUNC_END(__aes_ce_invert)
aixcc-public/challenge-001-exemplar-source
16,176
arch/arm64/crypto/crct10dif-ce-core.S
// // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> // Copyright (C) 2019 Google LLC <ebiggers@google.com> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // // Derived from the x86 version: // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // // Copyright (c) 2013, Intel Corporation // // Authors: // Erdinc Ozturk <erdinc.ozturk@intel.com> // Vinodh Gopal <vinodh.gopal@intel.com> // James Guilford <james.guilford@intel.com> // Tim Chen <tim.c.chen@linux.intel.com> // // This software is available to you under a choice of one of two // licenses. You may choose to be licensed under the terms of the GNU // General Public License (GPL) Version 2, available from the file // COPYING in the main directory of this source tree, or the // OpenIB.org BSD license below: // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the // distribution. // // * Neither the name of the Intel Corporation nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // #include <linux/linkage.h> #include <asm/assembler.h> .text .arch armv8-a+crypto init_crc .req w0 buf .req x1 len .req x2 fold_consts_ptr .req x3 fold_consts .req v10 ad .req v14 k00_16 .req v15 k32_48 .req v16 t3 .req v17 t4 .req v18 t5 .req v19 t6 .req v20 t7 .req v21 t8 .req v22 t9 .req v23 perm1 .req v24 perm2 .req v25 perm3 .req v26 perm4 .req v27 bd1 .req v28 bd2 .req v29 bd3 .req v30 bd4 .req v31 .macro __pmull_init_p64 .endm .macro __pmull_pre_p64, bd .endm .macro __pmull_init_p8 // k00_16 := 0x0000000000000000_000000000000ffff // k32_48 := 0x00000000ffffffff_0000ffffffffffff movi k32_48.2d, #0xffffffff mov k32_48.h[2], k32_48.h[0] ushr k00_16.2d, k32_48.2d, #32 // prepare the permutation vectors mov_q x5, 0x080f0e0d0c0b0a09 movi perm4.8b, #8 dup perm1.2d, x5 eor perm1.16b, perm1.16b, perm4.16b ushr perm2.2d, perm1.2d, #8 ushr perm3.2d, perm1.2d, #16 ushr perm4.2d, perm1.2d, #24 sli perm2.2d, perm1.2d, #56 sli perm3.2d, perm1.2d, #48 sli perm4.2d, perm1.2d, #40 .endm .macro __pmull_pre_p8, bd tbl bd1.16b, {\bd\().16b}, perm1.16b tbl bd2.16b, {\bd\().16b}, perm2.16b tbl bd3.16b, {\bd\().16b}, perm3.16b tbl bd4.16b, {\bd\().16b}, perm4.16b .endm SYM_FUNC_START_LOCAL(__pmull_p8_core) .L__pmull_p8_core: ext t4.8b, ad.8b, ad.8b, #1 // A1 ext t5.8b, ad.8b, ad.8b, #2 // A2 ext t6.8b, ad.8b, ad.8b, #3 // A3 pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B pmull t8.8h, ad.8b, bd1.8b // E = A*B1 pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B pmull t7.8h, ad.8b, bd2.8b // G = A*B2 pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B pmull t9.8h, ad.8b, bd3.8b // I = A*B3 pmull t3.8h, ad.8b, bd4.8b // K = A*B4 b 0f .L__pmull_p8_core2: tbl t4.16b, {ad.16b}, perm1.16b // A1 tbl t5.16b, {ad.16b}, perm2.16b // A2 tbl t6.16b, {ad.16b}, perm3.16b // A3 pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1 pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2 pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4 0: eor t4.16b, t4.16b, t8.16b // L = E + F eor t5.16b, t5.16b, t7.16b // M = G + H eor t6.16b, t6.16b, t9.16b // N = I + J uzp1 t8.2d, t4.2d, t5.2d uzp2 t4.2d, t4.2d, t5.2d uzp1 t7.2d, t6.2d, t3.2d uzp2 t6.2d, t6.2d, t3.2d // t4 = (L) (P0 + P1) << 8 // t5 = (M) (P2 + P3) << 16 eor t8.16b, t8.16b, t4.16b and t4.16b, t4.16b, k32_48.16b // t6 = (N) (P4 + P5) << 24 // t7 = (K) (P6 + P7) << 32 eor t7.16b, t7.16b, t6.16b and t6.16b, t6.16b, k00_16.16b eor t8.16b, t8.16b, t4.16b eor t7.16b, t7.16b, t6.16b zip2 t5.2d, t8.2d, t4.2d zip1 t4.2d, t8.2d, t4.2d zip2 t3.2d, t7.2d, t6.2d zip1 t6.2d, t7.2d, t6.2d ext t4.16b, t4.16b, t4.16b, #15 ext t5.16b, t5.16b, t5.16b, #14 ext t6.16b, t6.16b, t6.16b, #13 ext t3.16b, t3.16b, t3.16b, #12 eor t4.16b, t4.16b, t5.16b eor t6.16b, t6.16b, t3.16b ret SYM_FUNC_END(__pmull_p8_core) .macro __pmull_p8, rq, ad, bd, i .ifnc \bd, fold_consts .err .endif mov ad.16b, \ad\().16b .ifb \i pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B .else pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B .endif bl .L__pmull_p8_core\i eor \rq\().16b, \rq\().16b, t4.16b eor \rq\().16b, \rq\().16b, t6.16b .endm // Fold reg1, reg2 into the next 32 data bytes, storing the result back // into reg1, reg2. .macro fold_32_bytes, p, reg1, reg2 ldp q11, q12, [buf], #0x20 __pmull_\p v8, \reg1, fold_consts, 2 __pmull_\p \reg1, \reg1, fold_consts CPU_LE( rev64 v11.16b, v11.16b ) CPU_LE( rev64 v12.16b, v12.16b ) __pmull_\p v9, \reg2, fold_consts, 2 __pmull_\p \reg2, \reg2, fold_consts CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) eor \reg1\().16b, \reg1\().16b, v8.16b eor \reg2\().16b, \reg2\().16b, v9.16b eor \reg1\().16b, \reg1\().16b, v11.16b eor \reg2\().16b, \reg2\().16b, v12.16b .endm // Fold src_reg into dst_reg, optionally loading the next fold constants .macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts __pmull_\p v8, \src_reg, fold_consts __pmull_\p \src_reg, \src_reg, fold_consts, 2 .ifnb \load_next_consts ld1 {fold_consts.2d}, [fold_consts_ptr], #16 __pmull_pre_\p fold_consts .endif eor \dst_reg\().16b, \dst_reg\().16b, v8.16b eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b .endm .macro __pmull_p64, rd, rn, rm, n .ifb \n pmull \rd\().1q, \rn\().1d, \rm\().1d .else pmull2 \rd\().1q, \rn\().2d, \rm\().2d .endif .endm .macro crc_t10dif_pmull, p __pmull_init_\p // For sizes less than 256 bytes, we can't fold 128 bytes at a time. cmp len, #256 b.lt .Lless_than_256_bytes_\@ adr_l fold_consts_ptr, .Lfold_across_128_bytes_consts // Load the first 128 data bytes. Byte swapping is necessary to make // the bit order match the polynomial coefficient order. ldp q0, q1, [buf] ldp q2, q3, [buf, #0x20] ldp q4, q5, [buf, #0x40] ldp q6, q7, [buf, #0x60] add buf, buf, #0x80 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( rev64 v1.16b, v1.16b ) CPU_LE( rev64 v2.16b, v2.16b ) CPU_LE( rev64 v3.16b, v3.16b ) CPU_LE( rev64 v4.16b, v4.16b ) CPU_LE( rev64 v5.16b, v5.16b ) CPU_LE( rev64 v6.16b, v6.16b ) CPU_LE( rev64 v7.16b, v7.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 ) CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 ) CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) // XOR the first 16 data *bits* with the initial CRC value. movi v8.16b, #0 mov v8.h[7], init_crc eor v0.16b, v0.16b, v8.16b // Load the constants for folding across 128 bytes. ld1 {fold_consts.2d}, [fold_consts_ptr] __pmull_pre_\p fold_consts // Subtract 128 for the 128 data bytes just consumed. Subtract another // 128 to simplify the termination condition of the following loop. sub len, len, #256 // While >= 128 data bytes remain (not counting v0-v7), fold the 128 // bytes v0-v7 into them, storing the result back into v0-v7. .Lfold_128_bytes_loop_\@: fold_32_bytes \p, v0, v1 fold_32_bytes \p, v2, v3 fold_32_bytes \p, v4, v5 fold_32_bytes \p, v6, v7 subs len, len, #128 b.ge .Lfold_128_bytes_loop_\@ // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7. // Fold across 64 bytes. add fold_consts_ptr, fold_consts_ptr, #16 ld1 {fold_consts.2d}, [fold_consts_ptr], #16 __pmull_pre_\p fold_consts fold_16_bytes \p, v0, v4 fold_16_bytes \p, v1, v5 fold_16_bytes \p, v2, v6 fold_16_bytes \p, v3, v7, 1 // Fold across 32 bytes. fold_16_bytes \p, v4, v6 fold_16_bytes \p, v5, v7, 1 // Fold across 16 bytes. fold_16_bytes \p, v6, v7 // Add 128 to get the correct number of data bytes remaining in 0...127 // (not counting v7), following the previous extra subtraction by 128. // Then subtract 16 to simplify the termination condition of the // following loop. adds len, len, #(128-16) // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7 // into them, storing the result back into v7. b.lt .Lfold_16_bytes_loop_done_\@ .Lfold_16_bytes_loop_\@: __pmull_\p v8, v7, fold_consts __pmull_\p v7, v7, fold_consts, 2 eor v7.16b, v7.16b, v8.16b ldr q0, [buf], #16 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) eor v7.16b, v7.16b, v0.16b subs len, len, #16 b.ge .Lfold_16_bytes_loop_\@ .Lfold_16_bytes_loop_done_\@: // Add 16 to get the correct number of data bytes remaining in 0...15 // (not counting v7), following the previous extra subtraction by 16. adds len, len, #16 b.eq .Lreduce_final_16_bytes_\@ .Lhandle_partial_segment_\@: // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To // do this without needing a fold constant for each possible 'len', // redivide the bytes into a first chunk of 'len' bytes and a second // chunk of 16 bytes, then fold the first chunk into the second. // v0 = last 16 original data bytes add buf, buf, len ldr q0, [buf, #-16] CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes. adr_l x4, .Lbyteshift_table + 16 sub x4, x4, len ld1 {v2.16b}, [x4] tbl v1.16b, {v7.16b}, v2.16b // v3 = first chunk: v7 right-shifted by '16-len' bytes. movi v3.16b, #0x80 eor v2.16b, v2.16b, v3.16b tbl v3.16b, {v7.16b}, v2.16b // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. sshr v2.16b, v2.16b, #7 // v2 = second chunk: 'len' bytes from v0 (low-order bytes), // then '16-len' bytes from v1 (high-order bytes). bsl v2.16b, v1.16b, v0.16b // Fold the first chunk into the second chunk, storing the result in v7. __pmull_\p v0, v3, fold_consts __pmull_\p v7, v3, fold_consts, 2 eor v7.16b, v7.16b, v0.16b eor v7.16b, v7.16b, v2.16b .Lreduce_final_16_bytes_\@: // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC. movi v2.16b, #0 // init zero register // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. ld1 {fold_consts.2d}, [fold_consts_ptr], #16 __pmull_pre_\p fold_consts // Fold the high 64 bits into the low 64 bits, while also multiplying by // x^64. This produces a 128-bit value congruent to x^64 * M(x) and // whose low 48 bits are 0. ext v0.16b, v2.16b, v7.16b, #8 __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x)) eor v0.16b, v0.16b, v7.16b // + low bits * x^64 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit // value congruent to x^64 * M(x) and whose low 48 bits are 0. ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits mov v0.s[3], v2.s[0] // zero high 32 bits __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x)) eor v0.16b, v0.16b, v1.16b // + low bits // Load G(x) and floor(x^48 / G(x)). ld1 {fold_consts.2d}, [fold_consts_ptr] __pmull_pre_\p fold_consts // Use Barrett reduction to compute the final CRC value. __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x)) ushr v1.2d, v1.2d, #32 // /= x^32 __pmull_\p v1, v1, fold_consts // *= G(x) ushr v0.2d, v0.2d, #48 eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0. umov w0, v0.h[0] .ifc \p, p8 ldp x29, x30, [sp], #16 .endif ret .Lless_than_256_bytes_\@: // Checksumming a buffer of length 16...255 bytes adr_l fold_consts_ptr, .Lfold_across_16_bytes_consts // Load the first 16 data bytes. ldr q7, [buf], #0x10 CPU_LE( rev64 v7.16b, v7.16b ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) // XOR the first 16 data *bits* with the initial CRC value. movi v0.16b, #0 mov v0.h[7], init_crc eor v7.16b, v7.16b, v0.16b // Load the fold-across-16-bytes constants. ld1 {fold_consts.2d}, [fold_consts_ptr], #16 __pmull_pre_\p fold_consts cmp len, #16 b.eq .Lreduce_final_16_bytes_\@ // len == 16 subs len, len, #32 b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255 add len, len, #16 b .Lhandle_partial_segment_\@ // 17 <= len <= 31 .endm // // u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); // // Assumes len >= 16. // SYM_FUNC_START(crc_t10dif_pmull_p8) stp x29, x30, [sp, #-16]! mov x29, sp crc_t10dif_pmull p8 SYM_FUNC_END(crc_t10dif_pmull_p8) .align 5 // // u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); // // Assumes len >= 16. // SYM_FUNC_START(crc_t10dif_pmull_p64) crc_t10dif_pmull p64 SYM_FUNC_END(crc_t10dif_pmull_p64) .section ".rodata", "a" .align 4 // Fold constants precomputed from the polynomial 0x18bb7 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 .Lfold_across_128_bytes_consts: .quad 0x0000000000006123 // x^(8*128) mod G(x) .quad 0x0000000000002295 // x^(8*128+64) mod G(x) // .Lfold_across_64_bytes_consts: .quad 0x0000000000001069 // x^(4*128) mod G(x) .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) // .Lfold_across_32_bytes_consts: .quad 0x000000000000857d // x^(2*128) mod G(x) .quad 0x0000000000007acc // x^(2*128+64) mod G(x) .Lfold_across_16_bytes_consts: .quad 0x000000000000a010 // x^(1*128) mod G(x) .quad 0x0000000000001faa // x^(1*128+64) mod G(x) // .Lfinal_fold_consts: .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) // .Lbarrett_reduction_consts: .quad 0x0000000000018bb7 // G(x) .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - // len] is the index vector to shift left by 'len' bytes, and is also {0x80, // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. .Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
aixcc-public/challenge-001-exemplar-source
3,341
arch/arm64/crypto/aes-cipher-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Scalar AES core transform * * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> .text rk .req x0 out .req x1 in .req x2 rounds .req x3 tt .req x2 .macro __pair1, sz, op, reg0, reg1, in0, in1e, in1d, shift .ifc \op\shift, b0 ubfiz \reg0, \in0, #2, #8 ubfiz \reg1, \in1e, #2, #8 .else ubfx \reg0, \in0, #\shift, #8 ubfx \reg1, \in1e, #\shift, #8 .endif /* * AArch64 cannot do byte size indexed loads from a table containing * 32-bit quantities, i.e., 'ldrb w12, [tt, w12, uxtw #2]' is not a * valid instruction. So perform the shift explicitly first for the * high bytes (the low byte is shifted implicitly by using ubfiz rather * than ubfx above) */ .ifnc \op, b ldr \reg0, [tt, \reg0, uxtw #2] ldr \reg1, [tt, \reg1, uxtw #2] .else .if \shift > 0 lsl \reg0, \reg0, #2 lsl \reg1, \reg1, #2 .endif ldrb \reg0, [tt, \reg0, uxtw] ldrb \reg1, [tt, \reg1, uxtw] .endif .endm .macro __pair0, sz, op, reg0, reg1, in0, in1e, in1d, shift ubfx \reg0, \in0, #\shift, #8 ubfx \reg1, \in1d, #\shift, #8 ldr\op \reg0, [tt, \reg0, uxtw #\sz] ldr\op \reg1, [tt, \reg1, uxtw #\sz] .endm .macro __hround, out0, out1, in0, in1, in2, in3, t0, t1, enc, sz, op ldp \out0, \out1, [rk], #8 __pair\enc \sz, \op, w12, w13, \in0, \in1, \in3, 0 __pair\enc \sz, \op, w14, w15, \in1, \in2, \in0, 8 __pair\enc \sz, \op, w16, w17, \in2, \in3, \in1, 16 __pair\enc \sz, \op, \t0, \t1, \in3, \in0, \in2, 24 eor \out0, \out0, w12 eor \out1, \out1, w13 eor \out0, \out0, w14, ror #24 eor \out1, \out1, w15, ror #24 eor \out0, \out0, w16, ror #16 eor \out1, \out1, w17, ror #16 eor \out0, \out0, \t0, ror #8 eor \out1, \out1, \t1, ror #8 .endm .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op .endm .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op .endm .macro do_crypt, round, ttab, ltab, bsz ldp w4, w5, [in] ldp w6, w7, [in, #8] ldp w8, w9, [rk], #16 ldp w10, w11, [rk, #-8] CPU_BE( rev w4, w4 ) CPU_BE( rev w5, w5 ) CPU_BE( rev w6, w6 ) CPU_BE( rev w7, w7 ) eor w4, w4, w8 eor w5, w5, w9 eor w6, w6, w10 eor w7, w7, w11 adr_l tt, \ttab tbnz rounds, #1, 1f 0: \round w8, w9, w10, w11, w4, w5, w6, w7 \round w4, w5, w6, w7, w8, w9, w10, w11 1: subs rounds, rounds, #4 \round w8, w9, w10, w11, w4, w5, w6, w7 b.ls 3f 2: \round w4, w5, w6, w7, w8, w9, w10, w11 b 0b 3: adr_l tt, \ltab \round w4, w5, w6, w7, w8, w9, w10, w11, \bsz, b CPU_BE( rev w4, w4 ) CPU_BE( rev w5, w5 ) CPU_BE( rev w6, w6 ) CPU_BE( rev w7, w7 ) stp w4, w5, [out] stp w6, w7, [out, #8] ret .endm SYM_FUNC_START(__aes_arm64_encrypt) do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 SYM_FUNC_END(__aes_arm64_encrypt) .align 5 SYM_FUNC_START(__aes_arm64_decrypt) do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0 SYM_FUNC_END(__aes_arm64_decrypt)
aixcc-public/challenge-001-exemplar-source
3,311
arch/arm64/crypto/sm3-ce-core.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * sm3-ce-core.S - SM3 secure hash using ARMv8.2 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/assembler.h> .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 .set .Lv\b\().4s, \b .endr .macro sm3partw1, rd, rn, rm .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sm3partw2, rd, rn, rm .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sm3ss1, rd, rn, rm, ra .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) .endm .macro sm3tt1a, rd, rn, rm, imm2 .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro sm3tt1b, rd, rn, rm, imm2 .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro sm3tt2a, rd, rn, rm, imm2 .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro sm3tt2b, rd, rn, rm, imm2 .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro round, ab, s0, t0, t1, i sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s shl \t1\().4s, \t0\().4s, #1 sri \t1\().4s, \t0\().4s, #31 sm3tt1\ab v8.4s, v5.4s, v10.4s, \i sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i .endm .macro qround, ab, s0, s1, s2, s3, s4 .ifnb \s4 ext \s4\().16b, \s1\().16b, \s2\().16b, #12 ext v6.16b, \s0\().16b, \s1\().16b, #12 ext v7.16b, \s2\().16b, \s3\().16b, #8 sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s .endif eor v10.16b, \s0\().16b, \s1\().16b round \ab, \s0, v11, v12, 0 round \ab, \s0, v12, v11, 1 round \ab, \s0, v11, v12, 2 round \ab, \s0, v12, v11, 3 .ifnb \s4 sm3partw2 \s4\().4s, v7.4s, v6.4s .endif .endm /* * void sm3_ce_transform(struct sm3_state *sst, u8 const *src, * int blocks) */ .text SYM_TYPED_FUNC_START(sm3_ce_transform) /* load state */ ld1 {v8.4s-v9.4s}, [x0] rev64 v8.4s, v8.4s rev64 v9.4s, v9.4s ext v8.16b, v8.16b, v8.16b, #8 ext v9.16b, v9.16b, v9.16b, #8 adr_l x8, .Lt ldp s13, s14, [x8] /* load input */ 0: ld1 {v0.16b-v3.16b}, [x1], #64 sub w2, w2, #1 mov v15.16b, v8.16b mov v16.16b, v9.16b CPU_LE( rev32 v0.16b, v0.16b ) CPU_LE( rev32 v1.16b, v1.16b ) CPU_LE( rev32 v2.16b, v2.16b ) CPU_LE( rev32 v3.16b, v3.16b ) ext v11.16b, v13.16b, v13.16b, #4 qround a, v0, v1, v2, v3, v4 qround a, v1, v2, v3, v4, v0 qround a, v2, v3, v4, v0, v1 qround a, v3, v4, v0, v1, v2 ext v11.16b, v14.16b, v14.16b, #4 qround b, v4, v0, v1, v2, v3 qround b, v0, v1, v2, v3, v4 qround b, v1, v2, v3, v4, v0 qround b, v2, v3, v4, v0, v1 qround b, v3, v4, v0, v1, v2 qround b, v4, v0, v1, v2, v3 qround b, v0, v1, v2, v3, v4 qround b, v1, v2, v3, v4, v0 qround b, v2, v3, v4, v0, v1 qround b, v3, v4 qround b, v4, v0 qround b, v0, v1 eor v8.16b, v8.16b, v15.16b eor v9.16b, v9.16b, v16.16b /* handled all input blocks? */ cbnz w2, 0b /* save state */ rev64 v8.4s, v8.4s rev64 v9.4s, v9.4s ext v8.16b, v8.16b, v8.16b, #8 ext v9.16b, v9.16b, v9.16b, #8 st1 {v8.4s-v9.4s}, [x0] ret SYM_FUNC_END(sm3_ce_transform) .section ".rodata", "a" .align 3 .Lt: .word 0x79cc4519, 0x9d8a7a87
aixcc-public/challenge-001-exemplar-source
1,394
arch/arm64/lib/copy_to_user.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 ARM Ltd. */ #include <linux/linkage.h> #include <asm/asm-uaccess.h> #include <asm/assembler.h> #include <asm/cache.h> /* * Copy to user space from a kernel buffer (alignment handled by the hardware) * * Parameters: * x0 - to * x1 - from * x2 - n * Returns: * x0 - bytes not copied */ .macro ldrb1 reg, ptr, val ldrb \reg, [\ptr], \val .endm .macro strb1 reg, ptr, val user_ldst 9998f, sttrb, \reg, \ptr, \val .endm .macro ldrh1 reg, ptr, val ldrh \reg, [\ptr], \val .endm .macro strh1 reg, ptr, val user_ldst 9997f, sttrh, \reg, \ptr, \val .endm .macro ldr1 reg, ptr, val ldr \reg, [\ptr], \val .endm .macro str1 reg, ptr, val user_ldst 9997f, sttr, \reg, \ptr, \val .endm .macro ldp1 reg1, reg2, ptr, val ldp \reg1, \reg2, [\ptr], \val .endm .macro stp1 reg1, reg2, ptr, val user_stp 9997f, \reg1, \reg2, \ptr, \val .endm end .req x5 srcin .req x15 SYM_FUNC_START(__arch_copy_to_user) add end, x0, x2 mov srcin, x1 #include "copy_template.S" mov x0, #0 ret // Exception fixups 9997: cmp dst, dstin b.ne 9998f // Before being absolutely sure we couldn't copy anything, try harder ldrb tmp1w, [srcin] USER(9998f, sttrb tmp1w, [dst]) add dst, dst, #1 9998: sub x0, end, dst // bytes not copied ret SYM_FUNC_END(__arch_copy_to_user) EXPORT_SYMBOL(__arch_copy_to_user)
aixcc-public/challenge-001-exemplar-source
1,206
arch/arm64/lib/clear_user.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2021 Arm Ltd. */ #include <linux/linkage.h> #include <asm/asm-uaccess.h> .text /* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear * Returns : number of bytes NOT cleared * * Alignment fixed up by hardware. */ .p2align 4 // Alignment is for the loop, but since the prologue (including BTI) // is also 16 bytes we can keep any padding outside the function SYM_FUNC_START(__arch_clear_user) add x2, x0, x1 subs x1, x1, #8 b.mi 2f 1: USER(9f, sttr xzr, [x0]) add x0, x0, #8 subs x1, x1, #8 b.hi 1b USER(9f, sttr xzr, [x2, #-8]) mov x0, #0 ret 2: tbz x1, #2, 3f USER(9f, sttr wzr, [x0]) USER(8f, sttr wzr, [x2, #-4]) mov x0, #0 ret 3: tbz x1, #1, 4f USER(9f, sttrh wzr, [x0]) 4: tbz x1, #0, 5f USER(7f, sttrb wzr, [x2, #-1]) 5: mov x0, #0 ret // Exception fixups 7: sub x0, x2, #5 // Adjust for faulting on the final byte... 8: add x0, x0, #4 // ...or the second word of the 4-7 byte case 9: sub x0, x2, x0 ret SYM_FUNC_END(__arch_clear_user) EXPORT_SYMBOL(__arch_clear_user)
aixcc-public/challenge-001-exemplar-source
4,034
arch/arm64/lib/copy_template.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ */ /* * Copy a buffer from src to dest (alignment handled by the hardware) * * Parameters: * x0 - dest * x1 - src * x2 - n * Returns: * x0 - dest */ dstin .req x0 src .req x1 count .req x2 tmp1 .req x3 tmp1w .req w3 tmp2 .req x4 tmp2w .req w4 dst .req x6 A_l .req x7 A_h .req x8 B_l .req x9 B_h .req x10 C_l .req x11 C_h .req x12 D_l .req x13 D_h .req x14 mov dst, dstin cmp count, #16 /*When memory length is less than 16, the accessed are not aligned.*/ b.lo .Ltiny15 neg tmp2, src ands tmp2, tmp2, #15/* Bytes to reach alignment. */ b.eq .LSrcAligned sub count, count, tmp2 /* * Copy the leading memory data from src to dst in an increasing * address order.By this way,the risk of overwriting the source * memory data is eliminated when the distance between src and * dst is less than 16. The memory accesses here are alignment. */ tbz tmp2, #0, 1f ldrb1 tmp1w, src, #1 strb1 tmp1w, dst, #1 1: tbz tmp2, #1, 2f ldrh1 tmp1w, src, #2 strh1 tmp1w, dst, #2 2: tbz tmp2, #2, 3f ldr1 tmp1w, src, #4 str1 tmp1w, dst, #4 3: tbz tmp2, #3, .LSrcAligned ldr1 tmp1, src, #8 str1 tmp1, dst, #8 .LSrcAligned: cmp count, #64 b.ge .Lcpy_over64 /* * Deal with small copies quickly by dropping straight into the * exit block. */ .Ltail63: /* * Copy up to 48 bytes of data. At this point we only need the * bottom 6 bits of count to be accurate. */ ands tmp1, count, #0x30 b.eq .Ltiny15 cmp tmp1w, #0x20 b.eq 1f b.lt 2f ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 1: ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 2: ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 .Ltiny15: /* * Prefer to break one ldp/stp into several load/store to access * memory in an increasing address order,rather than to load/store 16 * bytes from (src-16) to (dst-16) and to backward the src to aligned * address,which way is used in original cortex memcpy. If keeping * the original memcpy process here, memmove need to satisfy the * precondition that src address is at least 16 bytes bigger than dst * address,otherwise some source data will be overwritten when memove * call memcpy directly. To make memmove simpler and decouple the * memcpy's dependency on memmove, withdrew the original process. */ tbz count, #3, 1f ldr1 tmp1, src, #8 str1 tmp1, dst, #8 1: tbz count, #2, 2f ldr1 tmp1w, src, #4 str1 tmp1w, dst, #4 2: tbz count, #1, 3f ldrh1 tmp1w, src, #2 strh1 tmp1w, dst, #2 3: tbz count, #0, .Lexitfunc ldrb1 tmp1w, src, #1 strb1 tmp1w, dst, #1 b .Lexitfunc .Lcpy_over64: subs count, count, #128 b.ge .Lcpy_body_large /* * Less than 128 bytes to copy, so handle 64 here and then jump * to the tail. */ ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 ldp1 B_l, B_h, src, #16 ldp1 C_l, C_h, src, #16 stp1 B_l, B_h, dst, #16 stp1 C_l, C_h, dst, #16 ldp1 D_l, D_h, src, #16 stp1 D_l, D_h, dst, #16 tst count, #0x3f b.ne .Ltail63 b .Lexitfunc /* * Critical loop. Start at a new cache line boundary. Assuming * 64 bytes per line this ensures the entire loop is in one line. */ .p2align L1_CACHE_SHIFT .Lcpy_body_large: /* pre-get 64 bytes data. */ ldp1 A_l, A_h, src, #16 ldp1 B_l, B_h, src, #16 ldp1 C_l, C_h, src, #16 ldp1 D_l, D_h, src, #16 1: /* * interlace the load of next 64 bytes data block with store of the last * loaded 64 bytes data. */ stp1 A_l, A_h, dst, #16 ldp1 A_l, A_h, src, #16 stp1 B_l, B_h, dst, #16 ldp1 B_l, B_h, src, #16 stp1 C_l, C_h, dst, #16 ldp1 C_l, C_h, src, #16 stp1 D_l, D_h, dst, #16 ldp1 D_l, D_h, src, #16 subs count, count, #64 b.ge 1b stp1 A_l, A_h, dst, #16 stp1 B_l, B_h, dst, #16 stp1 C_l, C_h, dst, #16 stp1 D_l, D_h, dst, #16 tst count, #0x3f b.ne .Ltail63 .Lexitfunc:
aixcc-public/challenge-001-exemplar-source
5,912
arch/arm64/lib/memcpy.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012-2021, Arm Limited. * * Adapted from the original at: * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/memcpy.S */ #include <linux/linkage.h> #include <asm/assembler.h> /* Assumptions: * * ARMv8-a, AArch64, unaligned accesses. * */ #define L(label) .L ## label #define dstin x0 #define src x1 #define count x2 #define dst x3 #define srcend x4 #define dstend x5 #define A_l x6 #define A_lw w6 #define A_h x7 #define B_l x8 #define B_lw w8 #define B_h x9 #define C_l x10 #define C_lw w10 #define C_h x11 #define D_l x12 #define D_h x13 #define E_l x14 #define E_h x15 #define F_l x16 #define F_h x17 #define G_l count #define G_h dst #define H_l src #define H_h srcend #define tmp1 x14 /* This implementation handles overlaps and supports both memcpy and memmove from a single entry point. It uses unaligned accesses and branchless sequences to keep the code small, simple and improve performance. Copies are split into 3 main cases: small copies of up to 32 bytes, medium copies of up to 128 bytes, and large copies. The overhead of the overlap check is negligible since it is only required for large copies. Large copies use a software pipelined loop processing 64 bytes per iteration. The destination pointer is 16-byte aligned to minimize unaligned accesses. The loop tail is handled by always copying 64 bytes from the end. */ SYM_FUNC_START(__pi_memcpy) add srcend, src, count add dstend, dstin, count cmp count, 128 b.hi L(copy_long) cmp count, 32 b.hi L(copy32_128) /* Small copies: 0..32 bytes. */ cmp count, 16 b.lo L(copy16) ldp A_l, A_h, [src] ldp D_l, D_h, [srcend, -16] stp A_l, A_h, [dstin] stp D_l, D_h, [dstend, -16] ret /* Copy 8-15 bytes. */ L(copy16): tbz count, 3, L(copy8) ldr A_l, [src] ldr A_h, [srcend, -8] str A_l, [dstin] str A_h, [dstend, -8] ret .p2align 3 /* Copy 4-7 bytes. */ L(copy8): tbz count, 2, L(copy4) ldr A_lw, [src] ldr B_lw, [srcend, -4] str A_lw, [dstin] str B_lw, [dstend, -4] ret /* Copy 0..3 bytes using a branchless sequence. */ L(copy4): cbz count, L(copy0) lsr tmp1, count, 1 ldrb A_lw, [src] ldrb C_lw, [srcend, -1] ldrb B_lw, [src, tmp1] strb A_lw, [dstin] strb B_lw, [dstin, tmp1] strb C_lw, [dstend, -1] L(copy0): ret .p2align 4 /* Medium copies: 33..128 bytes. */ L(copy32_128): ldp A_l, A_h, [src] ldp B_l, B_h, [src, 16] ldp C_l, C_h, [srcend, -32] ldp D_l, D_h, [srcend, -16] cmp count, 64 b.hi L(copy128) stp A_l, A_h, [dstin] stp B_l, B_h, [dstin, 16] stp C_l, C_h, [dstend, -32] stp D_l, D_h, [dstend, -16] ret .p2align 4 /* Copy 65..128 bytes. */ L(copy128): ldp E_l, E_h, [src, 32] ldp F_l, F_h, [src, 48] cmp count, 96 b.ls L(copy96) ldp G_l, G_h, [srcend, -64] ldp H_l, H_h, [srcend, -48] stp G_l, G_h, [dstend, -64] stp H_l, H_h, [dstend, -48] L(copy96): stp A_l, A_h, [dstin] stp B_l, B_h, [dstin, 16] stp E_l, E_h, [dstin, 32] stp F_l, F_h, [dstin, 48] stp C_l, C_h, [dstend, -32] stp D_l, D_h, [dstend, -16] ret .p2align 4 /* Copy more than 128 bytes. */ L(copy_long): /* Use backwards copy if there is an overlap. */ sub tmp1, dstin, src cbz tmp1, L(copy0) cmp tmp1, count b.lo L(copy_long_backwards) /* Copy 16 bytes and then align dst to 16-byte alignment. */ ldp D_l, D_h, [src] and tmp1, dstin, 15 bic dst, dstin, 15 sub src, src, tmp1 add count, count, tmp1 /* Count is now 16 too large. */ ldp A_l, A_h, [src, 16] stp D_l, D_h, [dstin] ldp B_l, B_h, [src, 32] ldp C_l, C_h, [src, 48] ldp D_l, D_h, [src, 64]! subs count, count, 128 + 16 /* Test and readjust count. */ b.ls L(copy64_from_end) L(loop64): stp A_l, A_h, [dst, 16] ldp A_l, A_h, [src, 16] stp B_l, B_h, [dst, 32] ldp B_l, B_h, [src, 32] stp C_l, C_h, [dst, 48] ldp C_l, C_h, [src, 48] stp D_l, D_h, [dst, 64]! ldp D_l, D_h, [src, 64]! subs count, count, 64 b.hi L(loop64) /* Write the last iteration and copy 64 bytes from the end. */ L(copy64_from_end): ldp E_l, E_h, [srcend, -64] stp A_l, A_h, [dst, 16] ldp A_l, A_h, [srcend, -48] stp B_l, B_h, [dst, 32] ldp B_l, B_h, [srcend, -32] stp C_l, C_h, [dst, 48] ldp C_l, C_h, [srcend, -16] stp D_l, D_h, [dst, 64] stp E_l, E_h, [dstend, -64] stp A_l, A_h, [dstend, -48] stp B_l, B_h, [dstend, -32] stp C_l, C_h, [dstend, -16] ret .p2align 4 /* Large backwards copy for overlapping copies. Copy 16 bytes and then align dst to 16-byte alignment. */ L(copy_long_backwards): ldp D_l, D_h, [srcend, -16] and tmp1, dstend, 15 sub srcend, srcend, tmp1 sub count, count, tmp1 ldp A_l, A_h, [srcend, -16] stp D_l, D_h, [dstend, -16] ldp B_l, B_h, [srcend, -32] ldp C_l, C_h, [srcend, -48] ldp D_l, D_h, [srcend, -64]! sub dstend, dstend, tmp1 subs count, count, 128 b.ls L(copy64_from_start) L(loop64_backwards): stp A_l, A_h, [dstend, -16] ldp A_l, A_h, [srcend, -16] stp B_l, B_h, [dstend, -32] ldp B_l, B_h, [srcend, -32] stp C_l, C_h, [dstend, -48] ldp C_l, C_h, [srcend, -48] stp D_l, D_h, [dstend, -64]! ldp D_l, D_h, [srcend, -64]! subs count, count, 64 b.hi L(loop64_backwards) /* Write the last iteration and copy 64 bytes from the start. */ L(copy64_from_start): ldp G_l, G_h, [src, 48] stp A_l, A_h, [dstend, -16] ldp A_l, A_h, [src, 32] stp B_l, B_h, [dstend, -32] ldp B_l, B_h, [src, 16] stp C_l, C_h, [dstend, -48] ldp C_l, C_h, [src] stp D_l, D_h, [dstend, -64] stp G_l, G_h, [dstin, 48] stp A_l, A_h, [dstin, 32] stp B_l, B_h, [dstin, 16] stp C_l, C_h, [dstin] ret SYM_FUNC_END(__pi_memcpy) SYM_FUNC_ALIAS(__memcpy, __pi_memcpy) EXPORT_SYMBOL(__memcpy) SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy) EXPORT_SYMBOL(memcpy) SYM_FUNC_ALIAS(__pi_memmove, __pi_memcpy) SYM_FUNC_ALIAS(__memmove, __pi_memmove) EXPORT_SYMBOL(__memmove) SYM_FUNC_ALIAS_WEAK(memmove, __memmove) EXPORT_SYMBOL(memmove)
aixcc-public/challenge-001-exemplar-source
2,858
arch/arm64/lib/crc32.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Accelerated CRC32(C) using AArch64 CRC instructions * * Copyright (C) 2016 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> .arch armv8-a+crc .macro byteorder, reg, be .if \be CPU_LE( rev \reg, \reg ) .else CPU_BE( rev \reg, \reg ) .endif .endm .macro byteorder16, reg, be .if \be CPU_LE( rev16 \reg, \reg ) .else CPU_BE( rev16 \reg, \reg ) .endif .endm .macro bitorder, reg, be .if \be rbit \reg, \reg .endif .endm .macro bitorder16, reg, be .if \be rbit \reg, \reg lsr \reg, \reg, #16 .endif .endm .macro bitorder8, reg, be .if \be rbit \reg, \reg lsr \reg, \reg, #24 .endif .endm .macro __crc32, c, be=0 bitorder w0, \be cmp x2, #16 b.lt 8f // less than 16 bytes and x7, x2, #0x1f and x2, x2, #~0x1f cbz x7, 32f // multiple of 32 bytes and x8, x7, #0xf ldp x3, x4, [x1] add x8, x8, x1 add x1, x1, x7 ldp x5, x6, [x8] byteorder x3, \be byteorder x4, \be byteorder x5, \be byteorder x6, \be bitorder x3, \be bitorder x4, \be bitorder x5, \be bitorder x6, \be tst x7, #8 crc32\c\()x w8, w0, x3 csel x3, x3, x4, eq csel w0, w0, w8, eq tst x7, #4 lsr x4, x3, #32 crc32\c\()w w8, w0, w3 csel x3, x3, x4, eq csel w0, w0, w8, eq tst x7, #2 lsr w4, w3, #16 crc32\c\()h w8, w0, w3 csel w3, w3, w4, eq csel w0, w0, w8, eq tst x7, #1 crc32\c\()b w8, w0, w3 csel w0, w0, w8, eq tst x7, #16 crc32\c\()x w8, w0, x5 crc32\c\()x w8, w8, x6 csel w0, w0, w8, eq cbz x2, 0f 32: ldp x3, x4, [x1], #32 sub x2, x2, #32 ldp x5, x6, [x1, #-16] byteorder x3, \be byteorder x4, \be byteorder x5, \be byteorder x6, \be bitorder x3, \be bitorder x4, \be bitorder x5, \be bitorder x6, \be crc32\c\()x w0, w0, x3 crc32\c\()x w0, w0, x4 crc32\c\()x w0, w0, x5 crc32\c\()x w0, w0, x6 cbnz x2, 32b 0: bitorder w0, \be ret 8: tbz x2, #3, 4f ldr x3, [x1], #8 byteorder x3, \be bitorder x3, \be crc32\c\()x w0, w0, x3 4: tbz x2, #2, 2f ldr w3, [x1], #4 byteorder w3, \be bitorder w3, \be crc32\c\()w w0, w0, w3 2: tbz x2, #1, 1f ldrh w3, [x1], #2 byteorder16 w3, \be bitorder16 w3, \be crc32\c\()h w0, w0, w3 1: tbz x2, #0, 0f ldrb w3, [x1] bitorder8 w3, \be crc32\c\()b w0, w0, w3 0: bitorder w0, \be ret .endm .align 5 SYM_FUNC_START(crc32_le) alternative_if_not ARM64_HAS_CRC32 b crc32_le_base alternative_else_nop_endif __crc32 SYM_FUNC_END(crc32_le) .align 5 SYM_FUNC_START(__crc32c_le) alternative_if_not ARM64_HAS_CRC32 b __crc32c_le_base alternative_else_nop_endif __crc32 c SYM_FUNC_END(__crc32c_le) .align 5 SYM_FUNC_START(crc32_be) alternative_if_not ARM64_HAS_CRC32 b crc32_be_base alternative_else_nop_endif __crc32 be=1 SYM_FUNC_END(crc32_be)
aixcc-public/challenge-001-exemplar-source
1,054
arch/arm64/lib/tishift.S
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) * * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include <linux/linkage.h> #include <asm/assembler.h> SYM_FUNC_START(__ashlti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 cmp x3, #0 b.le 2f lsl x1, x1, x2 lsr x3, x0, x3 lsl x2, x0, x2 orr x1, x1, x3 mov x0, x2 1: ret 2: neg w1, w3 mov x2, #0 lsl x1, x0, x1 mov x0, x2 ret SYM_FUNC_END(__ashlti3) EXPORT_SYMBOL(__ashlti3) SYM_FUNC_START(__ashrti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 cmp x3, #0 b.le 2f lsr x0, x0, x2 lsl x3, x1, x3 asr x2, x1, x2 orr x0, x0, x3 mov x1, x2 1: ret 2: neg w0, w3 asr x2, x1, #63 asr x0, x1, x0 mov x1, x2 ret SYM_FUNC_END(__ashrti3) EXPORT_SYMBOL(__ashrti3) SYM_FUNC_START(__lshrti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 cmp x3, #0 b.le 2f lsr x0, x0, x2 lsl x3, x1, x3 lsr x2, x1, x2 orr x0, x0, x3 mov x1, x2 1: ret 2: neg w0, w3 mov x2, #0 lsr x0, x1, x0 mov x1, x2 ret SYM_FUNC_END(__lshrti3) EXPORT_SYMBOL(__lshrti3)
aixcc-public/challenge-001-exemplar-source
2,110
arch/arm64/lib/kasan_sw_tags.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 Google LLC */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Report a tag mismatch detected by tag-based KASAN. * * A compiler-generated thunk calls this with a non-AAPCS calling * convention. Upon entry to this function, registers are as follows: * * x0: fault address (see below for restore) * x1: fault description (see below for restore) * x2 to x15: callee-saved * x16 to x17: safe to clobber * x18 to x30: callee-saved * sp: pre-decremented by 256 bytes (see below for restore) * * The caller has decremented the SP by 256 bytes, and created a * structure on the stack as follows: * * sp + 0..15: x0 and x1 to be restored * sp + 16..231: free for use * sp + 232..247: x29 and x30 (same as in GPRs) * sp + 248..255: free for use * * Note that this is not a struct pt_regs. * * To call a regular AAPCS function we must save x2 to x15 (which we can * store in the gaps), and create a frame record (for which we can use * x29 and x30 spilled by the caller as those match the GPRs). * * The caller expects x0 and x1 to be restored from the structure, and * for the structure to be removed from the stack (i.e. the SP must be * incremented by 256 prior to return). */ SYM_CODE_START(__hwasan_tag_mismatch) bti c add x29, sp, #232 stp x2, x3, [sp, #8 * 2] stp x4, x5, [sp, #8 * 4] stp x6, x7, [sp, #8 * 6] stp x8, x9, [sp, #8 * 8] stp x10, x11, [sp, #8 * 10] stp x12, x13, [sp, #8 * 12] stp x14, x15, [sp, #8 * 14] #ifndef CONFIG_SHADOW_CALL_STACK str x18, [sp, #8 * 18] #endif mov x2, x30 bl kasan_tag_mismatch ldp x0, x1, [sp] ldp x2, x3, [sp, #8 * 2] ldp x4, x5, [sp, #8 * 4] ldp x6, x7, [sp, #8 * 6] ldp x8, x9, [sp, #8 * 8] ldp x10, x11, [sp, #8 * 10] ldp x12, x13, [sp, #8 * 12] ldp x14, x15, [sp, #8 * 14] #ifndef CONFIG_SHADOW_CALL_STACK ldr x18, [sp, #8 * 18] #endif ldp x29, x30, [sp, #8 * 29] /* remove the structure from the stack */ add sp, sp, #256 ret SYM_CODE_END(__hwasan_tag_mismatch) EXPORT_SYMBOL(__hwasan_tag_mismatch)
aixcc-public/challenge-001-exemplar-source
1,748
arch/arm64/lib/copy_page.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 ARM Ltd. */ #include <linux/linkage.h> #include <linux/const.h> #include <asm/assembler.h> #include <asm/page.h> #include <asm/cpufeature.h> #include <asm/alternative.h> /* * Copy a page from src to dest (both are page aligned) * * Parameters: * x0 - dest * x1 - src */ SYM_FUNC_START(__pi_copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH // Prefetch three cache lines ahead. prfm pldl1strm, [x1, #128] prfm pldl1strm, [x1, #256] prfm pldl1strm, [x1, #384] alternative_else_nop_endif ldp x2, x3, [x1] ldp x4, x5, [x1, #16] ldp x6, x7, [x1, #32] ldp x8, x9, [x1, #48] ldp x10, x11, [x1, #64] ldp x12, x13, [x1, #80] ldp x14, x15, [x1, #96] ldp x16, x17, [x1, #112] add x0, x0, #256 add x1, x1, #128 1: tst x0, #(PAGE_SIZE - 1) alternative_if ARM64_HAS_NO_HW_PREFETCH prfm pldl1strm, [x1, #384] alternative_else_nop_endif stnp x2, x3, [x0, #-256] ldp x2, x3, [x1] stnp x4, x5, [x0, #16 - 256] ldp x4, x5, [x1, #16] stnp x6, x7, [x0, #32 - 256] ldp x6, x7, [x1, #32] stnp x8, x9, [x0, #48 - 256] ldp x8, x9, [x1, #48] stnp x10, x11, [x0, #64 - 256] ldp x10, x11, [x1, #64] stnp x12, x13, [x0, #80 - 256] ldp x12, x13, [x1, #80] stnp x14, x15, [x0, #96 - 256] ldp x14, x15, [x1, #96] stnp x16, x17, [x0, #112 - 256] ldp x16, x17, [x1, #112] add x0, x0, #128 add x1, x1, #128 b.ne 1b stnp x2, x3, [x0, #-256] stnp x4, x5, [x0, #16 - 256] stnp x6, x7, [x0, #32 - 256] stnp x8, x9, [x0, #48 - 256] stnp x10, x11, [x0, #64 - 256] stnp x12, x13, [x0, #80 - 256] stnp x14, x15, [x0, #96 - 256] stnp x16, x17, [x0, #112 - 256] ret SYM_FUNC_END(__pi_copy_page) SYM_FUNC_ALIAS(copy_page, __pi_copy_page) EXPORT_SYMBOL(copy_page)
aixcc-public/challenge-001-exemplar-source
3,730
arch/arm64/lib/mte.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 ARM Ltd. */ #include <linux/linkage.h> #include <asm/asm-uaccess.h> #include <asm/assembler.h> #include <asm/mte.h> #include <asm/page.h> #include <asm/sysreg.h> .arch armv8.5-a+memtag /* * multitag_transfer_size - set \reg to the block size that is accessed by the * LDGM/STGM instructions. */ .macro multitag_transfer_size, reg, tmp mrs_s \reg, SYS_GMID_EL1 ubfx \reg, \reg, #GMID_EL1_BS_SHIFT, #GMID_EL1_BS_SIZE mov \tmp, #4 lsl \reg, \tmp, \reg .endm /* * Clear the tags in a page * x0 - address of the page to be cleared */ SYM_FUNC_START(mte_clear_page_tags) multitag_transfer_size x1, x2 1: stgm xzr, [x0] add x0, x0, x1 tst x0, #(PAGE_SIZE - 1) b.ne 1b ret SYM_FUNC_END(mte_clear_page_tags) /* * Zero the page and tags at the same time * * Parameters: * x0 - address to the beginning of the page */ SYM_FUNC_START(mte_zero_clear_page_tags) and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag mrs x1, dczid_el0 tbnz x1, #4, 2f // Branch if DC GZVA is prohibited and w1, w1, #0xf mov x2, #4 lsl x1, x2, x1 1: dc gzva, x0 add x0, x0, x1 tst x0, #(PAGE_SIZE - 1) b.ne 1b ret 2: stz2g x0, [x0], #(MTE_GRANULE_SIZE * 2) tst x0, #(PAGE_SIZE - 1) b.ne 2b ret SYM_FUNC_END(mte_zero_clear_page_tags) /* * Copy the tags from the source page to the destination one * x0 - address of the destination page * x1 - address of the source page */ SYM_FUNC_START(mte_copy_page_tags) mov x2, x0 mov x3, x1 multitag_transfer_size x5, x6 1: ldgm x4, [x3] stgm x4, [x2] add x2, x2, x5 add x3, x3, x5 tst x2, #(PAGE_SIZE - 1) b.ne 1b ret SYM_FUNC_END(mte_copy_page_tags) /* * Read tags from a user buffer (one tag per byte) and set the corresponding * tags at the given kernel address. Used by PTRACE_POKEMTETAGS. * x0 - kernel address (to) * x1 - user buffer (from) * x2 - number of tags/bytes (n) * Returns: * x0 - number of tags read/set */ SYM_FUNC_START(mte_copy_tags_from_user) mov x3, x1 cbz x2, 2f 1: USER(2f, ldtrb w4, [x1]) lsl x4, x4, #MTE_TAG_SHIFT stg x4, [x0], #MTE_GRANULE_SIZE add x1, x1, #1 subs x2, x2, #1 b.ne 1b // exception handling and function return 2: sub x0, x1, x3 // update the number of tags set ret SYM_FUNC_END(mte_copy_tags_from_user) /* * Get the tags from a kernel address range and write the tag values to the * given user buffer (one tag per byte). Used by PTRACE_PEEKMTETAGS. * x0 - user buffer (to) * x1 - kernel address (from) * x2 - number of tags/bytes (n) * Returns: * x0 - number of tags read/set */ SYM_FUNC_START(mte_copy_tags_to_user) mov x3, x0 cbz x2, 2f 1: ldg x4, [x1] ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE USER(2f, sttrb w4, [x0]) add x0, x0, #1 add x1, x1, #MTE_GRANULE_SIZE subs x2, x2, #1 b.ne 1b // exception handling and function return 2: sub x0, x0, x3 // update the number of tags copied ret SYM_FUNC_END(mte_copy_tags_to_user) /* * Save the tags in a page * x0 - page address * x1 - tag storage, MTE_PAGE_TAG_STORAGE bytes */ SYM_FUNC_START(mte_save_page_tags) multitag_transfer_size x7, x5 1: mov x2, #0 2: ldgm x5, [x0] orr x2, x2, x5 add x0, x0, x7 tst x0, #0xFF // 16 tag values fit in a register, b.ne 2b // which is 16*16=256 bytes str x2, [x1], #8 tst x0, #(PAGE_SIZE - 1) b.ne 1b ret SYM_FUNC_END(mte_save_page_tags) /* * Restore the tags in a page * x0 - page address * x1 - tag storage, MTE_PAGE_TAG_STORAGE bytes */ SYM_FUNC_START(mte_restore_page_tags) multitag_transfer_size x7, x5 1: ldr x2, [x1], #8 2: stgm x2, [x0] add x0, x0, x7 tst x0, #0xFF b.ne 2b tst x0, #(PAGE_SIZE - 1) b.ne 1b ret SYM_FUNC_END(mte_restore_page_tags)
aixcc-public/challenge-001-exemplar-source
9,205
arch/arm64/lib/strncmp.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013-2022, Arm Limited. * * Adapted from the original at: * https://github.com/ARM-software/optimized-routines/blob/189dfefe37d54c5b/string/aarch64/strncmp.S */ #include <linux/linkage.h> #include <asm/assembler.h> /* Assumptions: * * ARMv8-a, AArch64. * MTE compatible. */ #define L(label) .L ## label #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f /* Parameters and result. */ #define src1 x0 #define src2 x1 #define limit x2 #define result x0 /* Internal variables. */ #define data1 x3 #define data1w w3 #define data2 x4 #define data2w w4 #define has_nul x5 #define diff x6 #define syndrome x7 #define tmp1 x8 #define tmp2 x9 #define tmp3 x10 #define zeroones x11 #define pos x12 #define mask x13 #define endloop x14 #define count mask #define offset pos #define neg_offset x15 /* Define endian dependent shift operations. On big-endian early bytes are at MSB and on little-endian LSB. LS_FW means shifting towards early bytes. LS_BK means shifting towards later bytes. */ #ifdef __AARCH64EB__ #define LS_FW lsl #define LS_BK lsr #else #define LS_FW lsr #define LS_BK lsl #endif SYM_FUNC_START(__pi_strncmp) cbz limit, L(ret0) eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 and count, src1, #7 b.ne L(misaligned8) cbnz count, L(mutual_align) /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and can be done in parallel across the entire word. */ .p2align 4 L(loop_aligned): ldr data1, [src1], #8 ldr data2, [src2], #8 L(start_realigned): subs limit, limit, #8 sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f eor diff, data1, data2 /* Non-zero if differences found. */ csinv endloop, diff, xzr, hi /* Last Dword or differences. */ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ ccmp endloop, #0, #0, eq b.eq L(loop_aligned) /* End of main loop */ L(full_check): #ifndef __AARCH64EB__ orr syndrome, diff, has_nul add limit, limit, 8 /* Rewind limit to before last subs. */ L(syndrome_check): /* Limit was reached. Check if the NUL byte or the difference is before the limit. */ rev syndrome, syndrome rev data1, data1 clz pos, syndrome rev data2, data2 lsl data1, data1, pos cmp limit, pos, lsr #3 lsl data2, data2, pos /* But we need to zero-extend (char is unsigned) the value and then perform a signed 32-bit subtraction. */ lsr data1, data1, #56 sub result, data1, data2, lsr #56 csel result, result, xzr, hi ret #else /* Not reached the limit, must have found the end or a diff. */ tbz limit, #63, L(not_limit) add tmp1, limit, 8 cbz limit, L(not_limit) lsl limit, tmp1, #3 /* Bits -> bytes. */ mov mask, #~0 lsr mask, mask, limit bic data1, data1, mask bic data2, data2, mask /* Make sure that the NUL byte is marked in the syndrome. */ orr has_nul, has_nul, mask L(not_limit): /* For big-endian we cannot use the trick with the syndrome value as carry-propagation can corrupt the upper bits if the trailing bytes in the string contain 0x01. */ /* However, if there is no NUL byte in the dword, we can generate the result directly. We can't just subtract the bytes as the MSB might be significant. */ cbnz has_nul, 1f cmp data1, data2 cset result, ne cneg result, result, lo ret 1: /* Re-compute the NUL-byte detection, using a byte-reversed value. */ rev tmp3, data1 sub tmp1, tmp3, zeroones orr tmp2, tmp3, #REP8_7f bic has_nul, tmp1, tmp2 rev has_nul, has_nul orr syndrome, diff, has_nul clz pos, syndrome /* The most-significant-non-zero bit of the syndrome marks either the first bit that is different, or the top bit of the first zero byte. Shifting left now will bring the critical information into the top bits. */ L(end_quick): lsl data1, data1, pos lsl data2, data2, pos /* But we need to zero-extend (char is unsigned) the value and then perform a signed 32-bit subtraction. */ lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret #endif L(mutual_align): /* Sources are mutually aligned, but are not currently at an alignment boundary. Round down the addresses and then mask off the bytes that precede the start point. We also need to adjust the limit calculations, but without overflowing if the limit is near ULONG_MAX. */ bic src1, src1, #7 bic src2, src2, #7 ldr data1, [src1], #8 neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */ ldr data2, [src2], #8 mov tmp2, #~0 LS_FW tmp2, tmp2, tmp3 /* Shift (count & 63). */ /* Adjust the limit and ensure it doesn't overflow. */ adds limit, limit, count csinv limit, limit, xzr, lo orr data1, data1, tmp2 orr data2, data2, tmp2 b L(start_realigned) .p2align 4 /* Don't bother with dwords for up to 16 bytes. */ L(misaligned8): cmp limit, #16 b.hs L(try_misaligned_words) L(byte_loop): /* Perhaps we can do better than this. */ ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 subs limit, limit, #1 ccmp data1w, #1, #0, hi /* NZCV = 0b0000. */ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */ b.eq L(byte_loop) L(done): sub result, data1, data2 ret /* Align the SRC1 to a dword by doing a bytewise compare and then do the dword loop. */ L(try_misaligned_words): cbz count, L(src1_aligned) neg count, count and count, count, #7 sub limit, limit, count L(page_end_loop): ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 cmp data1w, #1 ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */ b.ne L(done) subs count, count, #1 b.hi L(page_end_loop) /* The following diagram explains the comparison of misaligned strings. The bytes are shown in natural order. For little-endian, it is reversed in the registers. The "x" bytes are before the string. The "|" separates data that is loaded at one time. src1 | a a a a a a a a | b b b c c c c c | . . . src2 | x x x x x a a a a a a a a b b b | c c c c c . . . After shifting in each step, the data looks like this: STEP_A STEP_B STEP_C data1 a a a a a a a a b b b c c c c c b b b c c c c c data2 a a a a a a a a b b b 0 0 0 0 0 0 0 0 c c c c c The bytes with "0" are eliminated from the syndrome via mask. Align SRC2 down to 16 bytes. This way we can read 16 bytes at a time from SRC2. The comparison happens in 3 steps. After each step the loop can exit, or read from SRC1 or SRC2. */ L(src1_aligned): /* Calculate offset from 8 byte alignment to string start in bits. No need to mask offset since shifts are ignoring upper bits. */ lsl offset, src2, #3 bic src2, src2, #0xf mov mask, -1 neg neg_offset, offset ldr data1, [src1], #8 ldp tmp1, tmp2, [src2], #16 LS_BK mask, mask, neg_offset and neg_offset, neg_offset, #63 /* Need actual value for cmp later. */ /* Skip the first compare if data in tmp1 is irrelevant. */ tbnz offset, 6, L(misaligned_mid_loop) L(loop_misaligned): /* STEP_A: Compare full 8 bytes when there is enough data from SRC2.*/ LS_FW data2, tmp1, offset LS_BK tmp1, tmp2, neg_offset subs limit, limit, #8 orr data2, data2, tmp1 /* 8 bytes from SRC2 combined from two regs.*/ sub has_nul, data1, zeroones eor diff, data1, data2 /* Non-zero if differences found. */ orr tmp3, data1, #REP8_7f csinv endloop, diff, xzr, hi /* If limit, set to all ones. */ bic has_nul, has_nul, tmp3 /* Non-zero if NUL byte found in SRC1. */ orr tmp3, endloop, has_nul cbnz tmp3, L(full_check) ldr data1, [src1], #8 L(misaligned_mid_loop): /* STEP_B: Compare first part of data1 to second part of tmp2. */ LS_FW data2, tmp2, offset #ifdef __AARCH64EB__ /* For big-endian we do a byte reverse to avoid carry-propagation problem described above. This way we can reuse the has_nul in the next step and also use syndrome value trick at the end. */ rev tmp3, data1 #define data1_fixed tmp3 #else #define data1_fixed data1 #endif sub has_nul, data1_fixed, zeroones orr tmp3, data1_fixed, #REP8_7f eor diff, data2, data1 /* Non-zero if differences found. */ bic has_nul, has_nul, tmp3 /* Non-zero if NUL terminator. */ #ifdef __AARCH64EB__ rev has_nul, has_nul #endif cmp limit, neg_offset, lsr #3 orr syndrome, diff, has_nul bic syndrome, syndrome, mask /* Ignore later bytes. */ csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */ cbnz tmp3, L(syndrome_check) /* STEP_C: Compare second part of data1 to first part of tmp1. */ ldp tmp1, tmp2, [src2], #16 cmp limit, #8 LS_BK data2, tmp1, neg_offset eor diff, data2, data1 /* Non-zero if differences found. */ orr syndrome, diff, has_nul and syndrome, syndrome, mask /* Ignore earlier bytes. */ csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */ cbnz tmp3, L(syndrome_check) ldr data1, [src1], #8 sub limit, limit, #8 b L(loop_misaligned) #ifdef __AARCH64EB__ L(syndrome_check): clz pos, syndrome cmp pos, limit, lsl #3 b.lo L(end_quick) #endif L(ret0): mov result, #0 ret SYM_FUNC_END(__pi_strncmp) SYM_FUNC_ALIAS_WEAK(strncmp, __pi_strncmp) EXPORT_SYMBOL_NOKASAN(strncmp)
aixcc-public/challenge-001-exemplar-source
4,537
arch/arm64/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> /* * Fill in the buffer with character c (alignment handled by the hardware) * * Parameters: * x0 - buf * x1 - c * x2 - n * Returns: * x0 - buf */ dstin .req x0 val .req w1 count .req x2 tmp1 .req x3 tmp1w .req w3 tmp2 .req x4 tmp2w .req w4 zva_len_x .req x5 zva_len .req w5 zva_bits_x .req x6 A_l .req x7 A_lw .req w7 dst .req x8 tmp3w .req w9 tmp3 .req x9 SYM_FUNC_START(__pi_memset) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 orr A_lw, A_lw, A_lw, lsl #8 orr A_lw, A_lw, A_lw, lsl #16 orr A_l, A_l, A_l, lsl #32 cmp count, #15 b.hi .Lover16_proc /*All store maybe are non-aligned..*/ tbz count, #3, 1f str A_l, [dst], #8 1: tbz count, #2, 2f str A_lw, [dst], #4 2: tbz count, #1, 3f strh A_lw, [dst], #2 3: tbz count, #0, 4f strb A_lw, [dst] 4: ret .Lover16_proc: /*Whether the start address is aligned with 16.*/ neg tmp2, dst ands tmp2, tmp2, #15 b.eq .Laligned /* * The count is not less than 16, we can use stp to store the start 16 bytes, * then adjust the dst aligned with 16.This process will make the current * memory address at alignment boundary. */ stp A_l, A_l, [dst] /*non-aligned store..*/ /*make the dst aligned..*/ sub count, count, tmp2 add dst, dst, tmp2 .Laligned: cbz A_l, .Lzero_mem .Ltail_maybe_long: cmp count, #64 b.ge .Lnot_short .Ltail63: ands tmp1, count, #0x30 b.eq 3f cmp tmp1w, #0x20 b.eq 1f b.lt 2f stp A_l, A_l, [dst], #16 1: stp A_l, A_l, [dst], #16 2: stp A_l, A_l, [dst], #16 /* * The last store length is less than 16,use stp to write last 16 bytes. * It will lead some bytes written twice and the access is non-aligned. */ 3: ands count, count, #15 cbz count, 4f add dst, dst, count stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */ 4: ret /* * Critical loop. Start at a new cache line boundary. Assuming * 64 bytes per line, this ensures the entire loop is in one line. */ .p2align L1_CACHE_SHIFT .Lnot_short: sub dst, dst, #16/* Pre-bias. */ sub count, count, #64 1: stp A_l, A_l, [dst, #16] stp A_l, A_l, [dst, #32] stp A_l, A_l, [dst, #48] stp A_l, A_l, [dst, #64]! subs count, count, #64 b.ge 1b tst count, #0x3f add dst, dst, #16 b.ne .Ltail63 .Lexitfunc: ret /* * For zeroing memory, check to see if we can use the ZVA feature to * zero entire 'cache' lines. */ .Lzero_mem: cmp count, #63 b.le .Ltail63 /* * For zeroing small amounts of memory, it's not worth setting up * the line-clear code. */ cmp count, #128 b.lt .Lnot_short /*count is at least 128 bytes*/ mrs tmp1, dczid_el0 tbnz tmp1, #4, .Lnot_short mov tmp3w, #4 and zva_len, tmp1w, #15 /* Safety: other bits reserved. */ lsl zva_len, tmp3w, zva_len ands tmp3w, zva_len, #63 /* * ensure the zva_len is not less than 64. * It is not meaningful to use ZVA if the block size is less than 64. */ b.ne .Lnot_short .Lzero_by_line: /* * Compute how far we need to go to become suitably aligned. We're * already at quad-word alignment. */ cmp count, zva_len_x b.lt .Lnot_short /* Not enough to reach alignment. */ sub zva_bits_x, zva_len_x, #1 neg tmp2, dst ands tmp2, tmp2, zva_bits_x b.eq 2f /* Already aligned. */ /* Not aligned, check that there's enough to copy after alignment.*/ sub tmp1, count, tmp2 /* * grantee the remain length to be ZVA is bigger than 64, * avoid to make the 2f's process over mem range.*/ cmp tmp1, #64 ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */ b.lt .Lnot_short /* * We know that there's at least 64 bytes to zero and that it's safe * to overrun by 64 bytes. */ mov count, tmp1 1: stp A_l, A_l, [dst] stp A_l, A_l, [dst, #16] stp A_l, A_l, [dst, #32] subs tmp2, tmp2, #64 stp A_l, A_l, [dst, #48] add dst, dst, #64 b.ge 1b /* We've overrun a bit, so adjust dst downwards.*/ add dst, dst, tmp2 2: sub count, count, zva_len_x 3: dc zva, dst add dst, dst, zva_len_x subs count, count, zva_len_x b.ge 3b ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret SYM_FUNC_END(__pi_memset) SYM_FUNC_ALIAS(__memset, __pi_memset) EXPORT_SYMBOL(__memset) SYM_FUNC_ALIAS_WEAK(memset, __pi_memset) EXPORT_SYMBOL(memset)
aixcc-public/challenge-001-exemplar-source
1,407
arch/arm64/lib/copy_from_user.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 ARM Ltd. */ #include <linux/linkage.h> #include <asm/asm-uaccess.h> #include <asm/assembler.h> #include <asm/cache.h> /* * Copy from user space to a kernel buffer (alignment handled by the hardware) * * Parameters: * x0 - to * x1 - from * x2 - n * Returns: * x0 - bytes not copied */ .macro ldrb1 reg, ptr, val user_ldst 9998f, ldtrb, \reg, \ptr, \val .endm .macro strb1 reg, ptr, val strb \reg, [\ptr], \val .endm .macro ldrh1 reg, ptr, val user_ldst 9997f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val strh \reg, [\ptr], \val .endm .macro ldr1 reg, ptr, val user_ldst 9997f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val str \reg, [\ptr], \val .endm .macro ldp1 reg1, reg2, ptr, val user_ldp 9997f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val stp \reg1, \reg2, [\ptr], \val .endm end .req x5 srcin .req x15 SYM_FUNC_START(__arch_copy_from_user) add end, x0, x2 mov srcin, x1 #include "copy_template.S" mov x0, #0 // Nothing to copy ret // Exception fixups 9997: cmp dst, dstin b.ne 9998f // Before being absolutely sure we couldn't copy anything, try harder USER(9998f, ldtrb tmp1w, [srcin]) strb tmp1w, [dst], #1 9998: sub x0, end, dst // bytes not copied ret SYM_FUNC_END(__arch_copy_from_user) EXPORT_SYMBOL(__arch_copy_from_user)
aixcc-public/challenge-001-exemplar-source
1,411
arch/arm64/lib/memchr.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2021 Arm Ltd. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Find a character in an area of memory. * * Parameters: * x0 - buf * x1 - c * x2 - n * Returns: * x0 - address of first occurrence of 'c' or 0 */ #define L(label) .L ## label #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define srcin x0 #define chrin w1 #define cntin x2 #define result x0 #define wordcnt x3 #define rep01 x4 #define repchr x5 #define cur_word x6 #define cur_byte w6 #define tmp x7 #define tmp2 x8 .p2align 4 nop SYM_FUNC_START(__pi_memchr) and chrin, chrin, #0xff lsr wordcnt, cntin, #3 cbz wordcnt, L(byte_loop) mov rep01, #REP8_01 mul repchr, x1, rep01 and cntin, cntin, #7 L(word_loop): ldr cur_word, [srcin], #8 sub wordcnt, wordcnt, #1 eor cur_word, cur_word, repchr sub tmp, cur_word, rep01 orr tmp2, cur_word, #REP8_7f bics tmp, tmp, tmp2 b.ne L(found_word) cbnz wordcnt, L(word_loop) L(byte_loop): cbz cntin, L(not_found) ldrb cur_byte, [srcin], #1 sub cntin, cntin, #1 cmp cur_byte, chrin b.ne L(byte_loop) sub srcin, srcin, #1 ret L(found_word): CPU_LE( rev tmp, tmp) clz tmp, tmp sub tmp, tmp, #64 add result, srcin, tmp, asr #3 ret L(not_found): mov result, #0 ret SYM_FUNC_END(__pi_memchr) SYM_FUNC_ALIAS_WEAK(memchr, __pi_memchr) EXPORT_SYMBOL_NOKASAN(memchr)
aixcc-public/challenge-001-exemplar-source
2,951
arch/arm64/lib/memcmp.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013-2021, Arm Limited. * * Adapted from the original at: * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/memcmp.S */ #include <linux/linkage.h> #include <asm/assembler.h> /* Assumptions: * * ARMv8-a, AArch64, unaligned accesses. */ #define L(label) .L ## label /* Parameters and result. */ #define src1 x0 #define src2 x1 #define limit x2 #define result w0 /* Internal variables. */ #define data1 x3 #define data1w w3 #define data1h x4 #define data2 x5 #define data2w w5 #define data2h x6 #define tmp1 x7 #define tmp2 x8 SYM_FUNC_START(__pi_memcmp) subs limit, limit, 8 b.lo L(less8) ldr data1, [src1], 8 ldr data2, [src2], 8 cmp data1, data2 b.ne L(return) subs limit, limit, 8 b.gt L(more16) ldr data1, [src1, limit] ldr data2, [src2, limit] b L(return) L(more16): ldr data1, [src1], 8 ldr data2, [src2], 8 cmp data1, data2 bne L(return) /* Jump directly to comparing the last 16 bytes for 32 byte (or less) strings. */ subs limit, limit, 16 b.ls L(last_bytes) /* We overlap loads between 0-32 bytes at either side of SRC1 when we try to align, so limit it only to strings larger than 128 bytes. */ cmp limit, 96 b.ls L(loop16) /* Align src1 and adjust src2 with bytes not yet done. */ and tmp1, src1, 15 add limit, limit, tmp1 sub src1, src1, tmp1 sub src2, src2, tmp1 /* Loop performing 16 bytes per iteration using aligned src1. Limit is pre-decremented by 16 and must be larger than zero. Exit if <= 16 bytes left to do or if the data is not equal. */ .p2align 4 L(loop16): ldp data1, data1h, [src1], 16 ldp data2, data2h, [src2], 16 subs limit, limit, 16 ccmp data1, data2, 0, hi ccmp data1h, data2h, 0, eq b.eq L(loop16) cmp data1, data2 bne L(return) mov data1, data1h mov data2, data2h cmp data1, data2 bne L(return) /* Compare last 1-16 bytes using unaligned access. */ L(last_bytes): add src1, src1, limit add src2, src2, limit ldp data1, data1h, [src1] ldp data2, data2h, [src2] cmp data1, data2 bne L(return) mov data1, data1h mov data2, data2h cmp data1, data2 /* Compare data bytes and set return value to 0, -1 or 1. */ L(return): #ifndef __AARCH64EB__ rev data1, data1 rev data2, data2 #endif cmp data1, data2 L(ret_eq): cset result, ne cneg result, result, lo ret .p2align 4 /* Compare up to 8 bytes. Limit is [-8..-1]. */ L(less8): adds limit, limit, 4 b.lo L(less4) ldr data1w, [src1], 4 ldr data2w, [src2], 4 cmp data1w, data2w b.ne L(return) sub limit, limit, 4 L(less4): adds limit, limit, 4 beq L(ret_eq) L(byte_loop): ldrb data1w, [src1], 1 ldrb data2w, [src2], 1 subs limit, limit, 1 ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */ b.eq L(byte_loop) sub result, data1w, data2w ret SYM_FUNC_END(__pi_memcmp) SYM_FUNC_ALIAS_WEAK(memcmp, __pi_memcmp) EXPORT_SYMBOL_NOKASAN(memcmp)
aixcc-public/challenge-001-exemplar-source
6,353
arch/arm64/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013-2021, Arm Limited. * * Adapted from the original at: * https://github.com/ARM-software/optimized-routines/blob/98e4d6a5c13c8e54/string/aarch64/strlen.S */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/mte-def.h> /* Assumptions: * * ARMv8-a, AArch64, unaligned accesses, min page size 4k. */ #define L(label) .L ## label /* Arguments and results. */ #define srcin x0 #define len x0 /* Locals and temporaries. */ #define src x1 #define data1 x2 #define data2 x3 #define has_nul1 x4 #define has_nul2 x5 #define tmp1 x4 #define tmp2 x5 #define tmp3 x6 #define tmp4 x7 #define zeroones x8 /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and can be done in parallel across the entire word. A faster check (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives false hits for characters 129..255. */ #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 /* * When KASAN_HW_TAGS is in use, memory is checked at MTE_GRANULE_SIZE * (16-byte) granularity, and we must ensure that no access straddles this * alignment boundary. */ #ifdef CONFIG_KASAN_HW_TAGS #define MIN_PAGE_SIZE MTE_GRANULE_SIZE #else #define MIN_PAGE_SIZE 4096 #endif /* Since strings are short on average, we check the first 16 bytes of the string for a NUL character. In order to do an unaligned ldp safely we have to do a page cross check first. If there is a NUL byte we calculate the length from the 2 8-byte words using conditional select to reduce branch mispredictions (it is unlikely strlen will be repeatedly called on strings with the same length). If the string is longer than 16 bytes, we align src so don't need further page cross checks, and process 32 bytes per iteration using the fast NUL check. If we encounter non-ASCII characters, fallback to a second loop using the full NUL check. If the page cross check fails, we read 16 bytes from an aligned address, remove any characters before the string, and continue in the main loop using aligned loads. Since strings crossing a page in the first 16 bytes are rare (probability of 16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized. AArch64 systems have a minimum page size of 4k. We don't bother checking for larger page sizes - the cost of setting up the correct page size is just not worth the extra gain from a small reduction in the cases taking the slow path. Note that we only care about whether the first fetch, which may be misaligned, crosses a page boundary. */ SYM_FUNC_START(__pi_strlen) and tmp1, srcin, MIN_PAGE_SIZE - 1 mov zeroones, REP8_01 cmp tmp1, MIN_PAGE_SIZE - 16 b.gt L(page_cross) ldp data1, data2, [srcin] #ifdef __AARCH64EB__ /* For big-endian, carry propagation (if the final byte in the string is 0x01) means we cannot use has_nul1/2 directly. Since we expect strings to be small and early-exit, byte-swap the data now so has_null1/2 will be correct. */ rev data1, data1 rev data2, data2 #endif sub tmp1, data1, zeroones orr tmp2, data1, REP8_7f sub tmp3, data2, zeroones orr tmp4, data2, REP8_7f bics has_nul1, tmp1, tmp2 bic has_nul2, tmp3, tmp4 ccmp has_nul2, 0, 0, eq beq L(main_loop_entry) /* Enter with C = has_nul1 == 0. */ csel has_nul1, has_nul1, has_nul2, cc mov len, 8 rev has_nul1, has_nul1 clz tmp1, has_nul1 csel len, xzr, len, cc add len, len, tmp1, lsr 3 ret /* The inner loop processes 32 bytes per iteration and uses the fast NUL check. If we encounter non-ASCII characters, use a second loop with the accurate NUL check. */ .p2align 4 L(main_loop_entry): bic src, srcin, 15 sub src, src, 16 L(main_loop): ldp data1, data2, [src, 32]! L(page_cross_entry): sub tmp1, data1, zeroones sub tmp3, data2, zeroones orr tmp2, tmp1, tmp3 tst tmp2, zeroones, lsl 7 bne 1f ldp data1, data2, [src, 16] sub tmp1, data1, zeroones sub tmp3, data2, zeroones orr tmp2, tmp1, tmp3 tst tmp2, zeroones, lsl 7 beq L(main_loop) add src, src, 16 1: /* The fast check failed, so do the slower, accurate NUL check. */ orr tmp2, data1, REP8_7f orr tmp4, data2, REP8_7f bics has_nul1, tmp1, tmp2 bic has_nul2, tmp3, tmp4 ccmp has_nul2, 0, 0, eq beq L(nonascii_loop) /* Enter with C = has_nul1 == 0. */ L(tail): #ifdef __AARCH64EB__ /* For big-endian, carry propagation (if the final byte in the string is 0x01) means we cannot use has_nul1/2 directly. The easiest way to get the correct byte is to byte-swap the data and calculate the syndrome a second time. */ csel data1, data1, data2, cc rev data1, data1 sub tmp1, data1, zeroones orr tmp2, data1, REP8_7f bic has_nul1, tmp1, tmp2 #else csel has_nul1, has_nul1, has_nul2, cc #endif sub len, src, srcin rev has_nul1, has_nul1 add tmp2, len, 8 clz tmp1, has_nul1 csel len, len, tmp2, cc add len, len, tmp1, lsr 3 ret L(nonascii_loop): ldp data1, data2, [src, 16]! sub tmp1, data1, zeroones orr tmp2, data1, REP8_7f sub tmp3, data2, zeroones orr tmp4, data2, REP8_7f bics has_nul1, tmp1, tmp2 bic has_nul2, tmp3, tmp4 ccmp has_nul2, 0, 0, eq bne L(tail) ldp data1, data2, [src, 16]! sub tmp1, data1, zeroones orr tmp2, data1, REP8_7f sub tmp3, data2, zeroones orr tmp4, data2, REP8_7f bics has_nul1, tmp1, tmp2 bic has_nul2, tmp3, tmp4 ccmp has_nul2, 0, 0, eq beq L(nonascii_loop) b L(tail) /* Load 16 bytes from [srcin & ~15] and force the bytes that precede srcin to 0x7f, so we ignore any NUL bytes before the string. Then continue in the aligned loop. */ L(page_cross): bic src, srcin, 15 ldp data1, data2, [src] lsl tmp1, srcin, 3 mov tmp4, -1 #ifdef __AARCH64EB__ /* Big-endian. Early bytes are at MSB. */ lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */ #else /* Little-endian. Early bytes are at LSB. */ lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */ #endif orr tmp1, tmp1, REP8_80 orn data1, data1, tmp1 orn tmp2, data2, tmp1 tst srcin, 8 csel data1, data1, tmp4, eq csel data2, data2, tmp2, eq b L(page_cross_entry) SYM_FUNC_END(__pi_strlen) SYM_FUNC_ALIAS_WEAK(strlen, __pi_strlen) EXPORT_SYMBOL_NOKASAN(strlen)
aixcc-public/challenge-001-exemplar-source
4,252
arch/arm64/lib/strnlen.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ */ #include <linux/linkage.h> #include <asm/assembler.h> /* * determine the length of a fixed-size string * * Parameters: * x0 - const string pointer * x1 - maximal string length * Returns: * x0 - the return length of specific string */ /* Arguments and results. */ srcin .req x0 len .req x0 limit .req x1 /* Locals and temporaries. */ src .req x2 data1 .req x3 data2 .req x4 data2a .req x5 has_nul1 .req x6 has_nul2 .req x7 tmp1 .req x8 tmp2 .req x9 tmp3 .req x10 tmp4 .req x11 zeroones .req x12 pos .req x13 limit_wd .req x14 #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 SYM_FUNC_START(__pi_strnlen) cbz limit, .Lhit_limit mov zeroones, #REP8_01 bic src, srcin, #15 ands tmp1, srcin, #15 b.ne .Lmisaligned /* Calculate the number of full and partial words -1. */ sub limit_wd, limit, #1 /* Limit != 0, so no underflow. */ lsr limit_wd, limit_wd, #4 /* Convert to Qwords. */ /* * NUL detection works on the principle that (X - 1) & (~X) & 0x80 * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and * can be done in parallel across the entire word. */ /* * The inner loop deals with two Dwords at a time. This has a * slightly higher start-up cost, but we should win quite quickly, * especially on cores with a high number of issue slots per * cycle, as we get much better parallelism out of the operations. */ .Lloop: ldp data1, data2, [src], #16 .Lrealigned: sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f sub tmp3, data2, zeroones orr tmp4, data2, #REP8_7f bic has_nul1, tmp1, tmp2 bic has_nul2, tmp3, tmp4 subs limit_wd, limit_wd, #1 orr tmp1, has_nul1, has_nul2 ccmp tmp1, #0, #0, pl /* NZCV = 0000 */ b.eq .Lloop cbz tmp1, .Lhit_limit /* No null in final Qword. */ /* * We know there's a null in the final Qword. The easiest thing * to do now is work out the length of the string and return * MIN (len, limit). */ sub len, src, srcin cbz has_nul1, .Lnul_in_data2 CPU_BE( mov data2, data1 ) /*perpare data to re-calculate the syndrome*/ sub len, len, #8 mov has_nul2, has_nul1 .Lnul_in_data2: /* * For big-endian, carry propagation (if the final byte in the * string is 0x01) means we cannot use has_nul directly. The * easiest way to get the correct byte is to byte-swap the data * and calculate the syndrome a second time. */ CPU_BE( rev data2, data2 ) CPU_BE( sub tmp1, data2, zeroones ) CPU_BE( orr tmp2, data2, #REP8_7f ) CPU_BE( bic has_nul2, tmp1, tmp2 ) sub len, len, #8 rev has_nul2, has_nul2 clz pos, has_nul2 add len, len, pos, lsr #3 /* Bits to bytes. */ cmp len, limit csel len, len, limit, ls /* Return the lower value. */ ret .Lmisaligned: /* * Deal with a partial first word. * We're doing two things in parallel here; * 1) Calculate the number of words (but avoiding overflow if * limit is near ULONG_MAX) - to do this we need to work out * limit + tmp1 - 1 as a 65-bit value before shifting it; * 2) Load and mask the initial data words - we force the bytes * before the ones we are interested in to 0xff - this ensures * early bytes will not hit any zero detection. */ ldp data1, data2, [src], #16 sub limit_wd, limit, #1 and tmp3, limit_wd, #15 lsr limit_wd, limit_wd, #4 add tmp3, tmp3, tmp1 add limit_wd, limit_wd, tmp3, lsr #4 neg tmp4, tmp1 lsl tmp4, tmp4, #3 /* Bytes beyond alignment -> bits. */ mov tmp2, #~0 /* Big-endian. Early bytes are at MSB. */ CPU_BE( lsl tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ /* Little-endian. Early bytes are at LSB. */ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ cmp tmp1, #8 orr data1, data1, tmp2 orr data2a, data2, tmp2 csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned .Lhit_limit: mov len, limit ret SYM_FUNC_END(__pi_strnlen) SYM_FUNC_ALIAS_WEAK(strnlen, __pi_strnlen) EXPORT_SYMBOL_NOKASAN(strnlen)
aixcc-public/challenge-001-exemplar-source
4,345
arch/arm64/lib/strcmp.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012-2022, Arm Limited. * * Adapted from the original at: * https://github.com/ARM-software/optimized-routines/blob/189dfefe37d54c5b/string/aarch64/strcmp.S */ #include <linux/linkage.h> #include <asm/assembler.h> /* Assumptions: * * ARMv8-a, AArch64. * MTE compatible. */ #define L(label) .L ## label #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define src1 x0 #define src2 x1 #define result x0 #define data1 x2 #define data1w w2 #define data2 x3 #define data2w w3 #define has_nul x4 #define diff x5 #define off1 x5 #define syndrome x6 #define tmp x6 #define data3 x7 #define zeroones x8 #define shift x9 #define off2 x10 /* On big-endian early bytes are at MSB and on little-endian LSB. LS_FW means shifting towards early bytes. */ #ifdef __AARCH64EB__ # define LS_FW lsl #else # define LS_FW lsr #endif /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and can be done in parallel across the entire word. Since carry propagation makes 0x1 bytes before a NUL byte appear NUL too in big-endian, byte-reverse the data before the NUL check. */ SYM_FUNC_START(__pi_strcmp) sub off2, src2, src1 mov zeroones, REP8_01 and tmp, src1, 7 tst off2, 7 b.ne L(misaligned8) cbnz tmp, L(mutual_align) .p2align 4 L(loop_aligned): ldr data2, [src1, off2] ldr data1, [src1], 8 L(start_realigned): #ifdef __AARCH64EB__ rev tmp, data1 sub has_nul, tmp, zeroones orr tmp, tmp, REP8_7f #else sub has_nul, data1, zeroones orr tmp, data1, REP8_7f #endif bics has_nul, has_nul, tmp /* Non-zero if NUL terminator. */ ccmp data1, data2, 0, eq b.eq L(loop_aligned) #ifdef __AARCH64EB__ rev has_nul, has_nul #endif eor diff, data1, data2 orr syndrome, diff, has_nul L(end): #ifndef __AARCH64EB__ rev syndrome, syndrome rev data1, data1 rev data2, data2 #endif clz shift, syndrome /* The most-significant-non-zero bit of the syndrome marks either the first bit that is different, or the top bit of the first zero byte. Shifting left now will bring the critical information into the top bits. */ lsl data1, data1, shift lsl data2, data2, shift /* But we need to zero-extend (char is unsigned) the value and then perform a signed 32-bit subtraction. */ lsr data1, data1, 56 sub result, data1, data2, lsr 56 ret .p2align 4 L(mutual_align): /* Sources are mutually aligned, but are not currently at an alignment boundary. Round down the addresses and then mask off the bytes that precede the start point. */ bic src1, src1, 7 ldr data2, [src1, off2] ldr data1, [src1], 8 neg shift, src2, lsl 3 /* Bits to alignment -64. */ mov tmp, -1 LS_FW tmp, tmp, shift orr data1, data1, tmp orr data2, data2, tmp b L(start_realigned) L(misaligned8): /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always checking to make sure that we don't access beyond the end of SRC2. */ cbz tmp, L(src1_aligned) L(do_misaligned): ldrb data1w, [src1], 1 ldrb data2w, [src2], 1 cmp data1w, 0 ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */ b.ne L(done) tst src1, 7 b.ne L(do_misaligned) L(src1_aligned): neg shift, src2, lsl 3 bic src2, src2, 7 ldr data3, [src2], 8 #ifdef __AARCH64EB__ rev data3, data3 #endif lsr tmp, zeroones, shift orr data3, data3, tmp sub has_nul, data3, zeroones orr tmp, data3, REP8_7f bics has_nul, has_nul, tmp b.ne L(tail) sub off1, src2, src1 .p2align 4 L(loop_unaligned): ldr data3, [src1, off1] ldr data2, [src1, off2] #ifdef __AARCH64EB__ rev data3, data3 #endif sub has_nul, data3, zeroones orr tmp, data3, REP8_7f ldr data1, [src1], 8 bics has_nul, has_nul, tmp ccmp data1, data2, 0, eq b.eq L(loop_unaligned) lsl tmp, has_nul, shift #ifdef __AARCH64EB__ rev tmp, tmp #endif eor diff, data1, data2 orr syndrome, diff, tmp cbnz syndrome, L(end) L(tail): ldr data1, [src1] neg shift, shift lsr data2, data3, shift lsr has_nul, has_nul, shift #ifdef __AARCH64EB__ rev data2, data2 rev has_nul, has_nul #endif eor diff, data1, data2 orr syndrome, diff, has_nul b L(end) L(done): sub result, data1, data2 ret SYM_FUNC_END(__pi_strcmp) SYM_FUNC_ALIAS_WEAK(strcmp, __pi_strcmp) EXPORT_SYMBOL_NOKASAN(strcmp)
aixcc-public/challenge-001-exemplar-source
5,079
arch/arm64/mm/cache.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Cache maintenance * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. */ #include <linux/errno.h> #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/cpufeature.h> #include <asm/alternative.h> #include <asm/asm-uaccess.h> /* * caches_clean_inval_pou_macro(start,end) [fixup] * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, * and will be executed. * * - start - virtual start address of region * - end - virtual end address of region * - fixup - optional label to branch to on user fault */ .macro caches_clean_inval_pou_macro, fixup alternative_if ARM64_HAS_CACHE_IDC dsb ishst b .Ldc_skip_\@ alternative_else_nop_endif mov x2, x0 mov x3, x1 dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup .Ldc_skip_\@: alternative_if ARM64_HAS_CACHE_DIC isb b .Lic_skip_\@ alternative_else_nop_endif invalidate_icache_by_line x0, x1, x2, x3, \fixup .Lic_skip_\@: .endm /* * caches_clean_inval_pou(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, * and will be executed. * * - start - virtual start address of region * - end - virtual end address of region */ SYM_FUNC_START(caches_clean_inval_pou) caches_clean_inval_pou_macro ret SYM_FUNC_END(caches_clean_inval_pou) /* * caches_clean_inval_user_pou(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, * and will be executed. * * - start - virtual start address of region * - end - virtual end address of region */ SYM_FUNC_START(caches_clean_inval_user_pou) uaccess_ttbr0_enable x2, x3, x4 caches_clean_inval_pou_macro 2f mov x0, xzr 1: uaccess_ttbr0_disable x1, x2 ret 2: mov x0, #-EFAULT b 1b SYM_FUNC_END(caches_clean_inval_user_pou) /* * icache_inval_pou(start,end) * * Ensure that the I cache is invalid within specified region. * * - start - virtual start address of region * - end - virtual end address of region */ SYM_FUNC_START(icache_inval_pou) alternative_if ARM64_HAS_CACHE_DIC isb ret alternative_else_nop_endif invalidate_icache_by_line x0, x1, x2, x3 ret SYM_FUNC_END(icache_inval_pou) /* * dcache_clean_inval_poc(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned and invalidated to the PoC. * * - start - virtual start address of region * - end - virtual end address of region */ SYM_FUNC_START(__pi_dcache_clean_inval_poc) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END(__pi_dcache_clean_inval_poc) SYM_FUNC_ALIAS(dcache_clean_inval_poc, __pi_dcache_clean_inval_poc) /* * dcache_clean_pou(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoU. * * - start - virtual start address of region * - end - virtual end address of region */ SYM_FUNC_START(dcache_clean_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret SYM_FUNC_END(dcache_clean_pou) /* * dcache_inval_poc(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are invalidated. Any partial lines at the ends of the interval are * also cleaned to PoC to prevent data loss. * * - start - kernel start address of region * - end - kernel end address of region */ SYM_FUNC_START(__pi_dcache_inval_poc) dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? bic x1, x1, x3 b.eq 1f dc civac, x1 // clean & invalidate D / U line 1: tst x0, x3 // start cache line aligned? bic x0, x0, x3 b.eq 2f dc civac, x0 // clean & invalidate D / U line b 3f 2: dc ivac, x0 // invalidate D / U line 3: add x0, x0, x2 cmp x0, x1 b.lo 2b dsb sy ret SYM_FUNC_END(__pi_dcache_inval_poc) SYM_FUNC_ALIAS(dcache_inval_poc, __pi_dcache_inval_poc) /* * dcache_clean_poc(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoC. * * - start - virtual start address of region * - end - virtual end address of region */ SYM_FUNC_START(__pi_dcache_clean_poc) dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret SYM_FUNC_END(__pi_dcache_clean_poc) SYM_FUNC_ALIAS(dcache_clean_poc, __pi_dcache_clean_poc) /* * dcache_clean_pop(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoP. * * - start - virtual start address of region * - end - virtual end address of region */ SYM_FUNC_START(__pi_dcache_clean_pop) alternative_if_not ARM64_HAS_DCPOP b dcache_clean_poc alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret SYM_FUNC_END(__pi_dcache_clean_pop) SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop)
aixcc-public/challenge-001-exemplar-source
11,084
arch/arm64/mm/proc.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Based on arch/arm/mm/proc.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. * Author: Catalin Marinas <catalin.marinas@arm.com> */ #include <linux/init.h> #include <linux/linkage.h> #include <linux/pgtable.h> #include <linux/cfi_types.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/asm_pointer_auth.h> #include <asm/hwcap.h> #include <asm/kernel-pgtable.h> #include <asm/pgtable-hwdef.h> #include <asm/cpufeature.h> #include <asm/alternative.h> #include <asm/smp.h> #include <asm/sysreg.h> #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K #elif defined(CONFIG_ARM64_16K_PAGES) #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K #else /* CONFIG_ARM64_4K_PAGES */ #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K #endif #ifdef CONFIG_RANDOMIZE_BASE #define TCR_KASLR_FLAGS TCR_NFD1 #else #define TCR_KASLR_FLAGS 0 #endif #define TCR_SMP_FLAGS TCR_SHARED /* PTWs cacheable, inner/outer WBWA */ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #ifdef CONFIG_KASAN_SW_TAGS #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 #else #define TCR_KASAN_SW_FLAGS 0 #endif #ifdef CONFIG_KASAN_HW_TAGS #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 #elif defined(CONFIG_ARM64_MTE) /* * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on * TBI being enabled at EL1. */ #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 #else #define TCR_MTE_FLAGS 0 #endif /* * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and * changed during mte_cpu_setup to Normal Tagged if the system supports MTE. */ #define MAIR_EL1_SET \ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context * * x0: virtual address of context pointer * * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. */ SYM_FUNC_START(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 mrs x5, osdlr_el1 mrs x6, cpacr_el1 mrs x7, tcr_el1 mrs x8, vbar_el1 mrs x9, mdscr_el1 mrs x10, oslsr_el1 mrs x11, sctlr_el1 get_this_cpu_offset x12 mrs x13, sp_el0 stp x2, x3, [x0] stp x4, x5, [x0, #16] stp x6, x7, [x0, #32] stp x8, x9, [x0, #48] stp x10, x11, [x0, #64] stp x12, x13, [x0, #80] /* * Save x18 as it may be used as a platform register, e.g. by shadow * call stack. */ str x18, [x0, #96] ret SYM_FUNC_END(cpu_do_suspend) /** * cpu_do_resume - restore CPU register context * * x0: Address of context pointer */ .pushsection ".idmap.text", "awx" SYM_FUNC_START(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] ldp x6, x8, [x0, #32] ldp x9, x10, [x0, #48] ldp x11, x12, [x0, #64] ldp x13, x14, [x0, #80] /* * Restore x18, as it may be used as a platform register, and clear * the buffer to minimize the risk of exposure when used for shadow * call stack. */ ldr x18, [x0, #96] str xzr, [x0, #96] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 msr cpacr_el1, x6 /* Don't change t0sz here, mask those bits when restoring */ mrs x7, tcr_el1 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH msr tcr_el1, x8 msr vbar_el1, x9 /* * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug * exception. Mask them until local_daif_restore() in cpu_suspend() * resets them. */ disable_daif msr mdscr_el1, x10 msr sctlr_el1, x12 set_this_cpu_offset x13 msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 */ msr osdlr_el1, x5 ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 reset_amuserenr_el0 x0 // Disable AMU access from EL0 alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr alternative_else_nop_endif ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 isb ret SYM_FUNC_END(cpu_do_resume) .popsection #endif .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, reserved_pg_dir phys_to_ttbr \tmp2, \tmp1 offset_ttbr1 \tmp2, \tmp1 msr ttbr1_el1, \tmp2 isb tlbi vmalle1 dsb nsh isb .endm /* * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) * * This is the low-level counterpart to cpu_replace_ttbr1, and should not be * called by anything else. It can only be executed from a TTBR0 mapping. */ SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) save_and_disable_daif flags=x2 __idmap_cpu_set_reserved_ttbr1 x1, x3 offset_ttbr1 x0, x3 msr ttbr1_el1, x0 isb restore_daif x2 ret SYM_FUNC_END(idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) .pushsection ".idmap.text", "awx" .macro kpti_mk_tbl_ng, type, num_entries add end_\type\()p, cur_\type\()p, #\num_entries * 8 .Ldo_\type: ldr \type, [cur_\type\()p] // Load the entry tbz \type, #0, .Lnext_\type // Skip invalid and tbnz \type, #11, .Lnext_\type // non-global entries orr \type, \type, #PTE_NG // Same bit for blocks and pages str \type, [cur_\type\()p] // Update the entry .ifnc \type, pte tbnz \type, #1, .Lderef_\type .endif .Lnext_\type: add cur_\type\()p, cur_\type\()p, #8 cmp cur_\type\()p, end_\type\()p b.ne .Ldo_\type .endm /* * Dereference the current table entry and map it into the temporary * fixmap slot associated with the current level. */ .macro kpti_map_pgtbl, type, level str xzr, [temp_pte, #8 * (\level + 1)] // break before make dsb nshst add pte, temp_pte, #PAGE_SIZE * (\level + 1) lsr pte, pte, #12 tlbi vaae1, pte dsb nsh isb phys_to_pte pte, cur_\type\()p add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1) orr pte, pte, pte_flags str pte, [temp_pte, #8 * (\level + 1)] dsb nshst .endm /* * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd, * unsigned long temp_pte_va) * * Called exactly once from stop_machine context by each CPU found during boot. */ .pushsection ".data", "aw", %progbits SYM_DATA(__idmap_kpti_flag, .long 1) .popsection SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) cpu .req w0 temp_pte .req x0 num_cpus .req w1 pte_flags .req x1 temp_pgd_phys .req x2 swapper_ttb .req x3 flag_ptr .req x4 cur_pgdp .req x5 end_pgdp .req x6 pgd .req x7 cur_pudp .req x8 end_pudp .req x9 cur_pmdp .req x11 end_pmdp .req x12 cur_ptep .req x14 end_ptep .req x15 pte .req x16 valid .req x17 mov x5, x3 // preserve temp_pte arg mrs swapper_ttb, ttbr1_el1 adr_l flag_ptr, __idmap_kpti_flag cbnz cpu, __idmap_kpti_secondary /* We're the boot CPU. Wait for the others to catch up */ sevl 1: wfe ldaxr w17, [flag_ptr] eor w17, w17, num_cpus cbnz w17, 1b /* Switch to the temporary page tables on this CPU only */ __idmap_cpu_set_reserved_ttbr1 x8, x9 offset_ttbr1 temp_pgd_phys, x8 msr ttbr1_el1, temp_pgd_phys isb mov temp_pte, x5 mov pte_flags, #KPTI_NG_PTE_FLAGS /* Everybody is enjoying the idmap, so we can rewrite swapper. */ /* PGD */ adrp cur_pgdp, swapper_pg_dir kpti_map_pgtbl pgd, 0 kpti_mk_tbl_ng pgd, PTRS_PER_PGD /* Ensure all the updated entries are visible to secondary CPUs */ dsb ishst /* We're done: fire up swapper_pg_dir again */ __idmap_cpu_set_reserved_ttbr1 x8, x9 msr ttbr1_el1, swapper_ttb isb /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] ret .Lderef_pgd: /* PUD */ .if CONFIG_PGTABLE_LEVELS > 3 pud .req x10 pte_to_phys cur_pudp, pgd kpti_map_pgtbl pud, 1 kpti_mk_tbl_ng pud, PTRS_PER_PUD b .Lnext_pgd .else /* CONFIG_PGTABLE_LEVELS <= 3 */ pud .req pgd .set .Lnext_pud, .Lnext_pgd .endif .Lderef_pud: /* PMD */ .if CONFIG_PGTABLE_LEVELS > 2 pmd .req x13 pte_to_phys cur_pmdp, pud kpti_map_pgtbl pmd, 2 kpti_mk_tbl_ng pmd, PTRS_PER_PMD b .Lnext_pud .else /* CONFIG_PGTABLE_LEVELS <= 2 */ pmd .req pgd .set .Lnext_pmd, .Lnext_pgd .endif .Lderef_pmd: /* PTE */ pte_to_phys cur_ptep, pmd kpti_map_pgtbl pte, 3 kpti_mk_tbl_ng pte, PTRS_PER_PTE b .Lnext_pmd .unreq cpu .unreq temp_pte .unreq num_cpus .unreq pte_flags .unreq temp_pgd_phys .unreq cur_pgdp .unreq end_pgdp .unreq pgd .unreq cur_pudp .unreq end_pudp .unreq pud .unreq cur_pmdp .unreq end_pmdp .unreq pmd .unreq cur_ptep .unreq end_ptep .unreq pte .unreq valid /* Secondary CPUs end up here */ __idmap_kpti_secondary: /* Uninstall swapper before surgery begins */ __idmap_cpu_set_reserved_ttbr1 x16, x17 /* Increment the flag to let the boot CPU we're ready */ 1: ldxr w16, [flag_ptr] add w16, w16, #1 stxr w17, w16, [flag_ptr] cbnz w17, 1b /* Wait for the boot CPU to finish messing around with swapper */ sevl 1: wfe ldxr w16, [flag_ptr] cbnz w16, 1b /* All done, act like nothing happened */ msr ttbr1_el1, swapper_ttb isb ret .unreq swapper_ttb .unreq flag_ptr SYM_FUNC_END(idmap_kpti_install_ng_mappings) .popsection #endif /* * __cpu_setup * * Initialise the processor for turning the MMU on. * * Input: * x0 - actual number of VA bits (ignored unless VA_BITS > 48) * Output: * Return in x0 the value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh mov x1, #3 << 20 msr cpacr_el1, x1 // Enable FP/ASIMD mov x1, #1 << 12 // Reset mdscr_el1 and disable msr mdscr_el1, x1 // access to the DCC from EL0 isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu reset_pmuserenr_el0 x1 // Disable PMU access from EL0 reset_amuserenr_el0 x1 // Disable AMU access from EL0 /* * Default values for VMSA control registers. These will be adjusted * below depending on detected CPU features. */ mair .req x17 tcr .req x16 mov_q mair, MAIR_EL1_SET mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS tcr_clear_errata_bits tcr, x9, x5 #ifdef CONFIG_ARM64_VA_BITS_52 sub x9, xzr, x0 add x9, x9, #64 tcr_set_t1sz tcr, x9 #else idmap_get_t0sz x9 #endif tcr_set_t0sz tcr, x9 /* * Set the IPS bits in TCR_EL1. */ tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Enable hardware update of the Access Flags bit. * Hardware dirty bit management is enabled later, * via capabilities. */ mrs x9, ID_AA64MMFR1_EL1 and x9, x9, #0xf cbz x9, 1f orr tcr, tcr, #TCR_HA // hardware Access flag update 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr mair_el1, mair msr tcr_el1, tcr /* * Prepare SCTLR */ mov_q x0, INIT_SCTLR_EL1_MMU_ON ret // return to head.S .unreq mair .unreq tcr SYM_FUNC_END(__cpu_setup)
aixcc-public/challenge-001-exemplar-source
1,939
arch/arm64/mm/trans_pgd-asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2021, Microsoft Corporation. * Pasha Tatashin <pasha.tatashin@soleen.com> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/kvm_asm.h> .macro invalid_vector label SYM_CODE_START_LOCAL(\label) .align 7 b \label SYM_CODE_END(\label) .endm .macro el1_sync_vector SYM_CODE_START_LOCAL(el1_sync) .align 7 cmp x0, #HVC_SET_VECTORS /* Called from hibernate */ b.ne 1f msr vbar_el2, x1 mov x0, xzr eret 1: cmp x0, #HVC_SOFT_RESTART /* Called from kexec */ b.ne 2f mov x0, x2 mov x2, x4 mov x4, x1 mov x1, x3 br x4 2: /* Unexpected argument, set an error */ mov_q x0, HVC_STUB_ERR eret SYM_CODE_END(el1_sync) .endm SYM_CODE_START(trans_pgd_stub_vectors) invalid_vector hyp_stub_el2t_sync_invalid // Synchronous EL2t invalid_vector hyp_stub_el2t_irq_invalid // IRQ EL2t invalid_vector hyp_stub_el2t_fiq_invalid // FIQ EL2t invalid_vector hyp_stub_el2t_error_invalid // Error EL2t invalid_vector hyp_stub_el2h_sync_invalid // Synchronous EL2h invalid_vector hyp_stub_el2h_irq_invalid // IRQ EL2h invalid_vector hyp_stub_el2h_fiq_invalid // FIQ EL2h invalid_vector hyp_stub_el2h_error_invalid // Error EL2h el1_sync_vector // Synchronous 64-bit EL1 invalid_vector hyp_stub_el1_irq_invalid // IRQ 64-bit EL1 invalid_vector hyp_stub_el1_fiq_invalid // FIQ 64-bit EL1 invalid_vector hyp_stub_el1_error_invalid // Error 64-bit EL1 invalid_vector hyp_stub_32b_el1_sync_invalid // Synchronous 32-bit EL1 invalid_vector hyp_stub_32b_el1_irq_invalid // IRQ 32-bit EL1 invalid_vector hyp_stub_32b_el1_fiq_invalid // FIQ 32-bit EL1 invalid_vector hyp_stub_32b_el1_error_invalid // Error 32-bit EL1 .align 11 SYM_INNER_LABEL(__trans_pgd_stub_vectors_end, SYM_L_LOCAL) SYM_CODE_END(trans_pgd_stub_vectors) # Check the trans_pgd_stub_vectors didn't overflow .org . - (__trans_pgd_stub_vectors_end - trans_pgd_stub_vectors) + SZ_2K
aixcc-public/challenge-001-exemplar-source
1,850
arch/arm64/kernel/vdso32/vdso.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Adapted from arm64 version. * * GNU linker script for the VDSO library. * Heavily based on the vDSO linker scripts for other archs. * * Copyright (C) 2012-2018 ARM Limited */ #include <linux/const.h> #include <asm/page.h> #include <asm/vdso.h> #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm") OUTPUT_ARCH(arm) SECTIONS { PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); #ifdef CONFIG_TIME_NS PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE); #endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) *(.got) *(.got.plt) *(.plt) *(.rel.iplt) *(.iplt) *(.igot.plt) } :text .text : { *(.text*) *(.glue_7) *(.glue_7t) *(.vfp11_veneer) *(.v4_bx) } :text =0xe7f001f2 .rel.dyn : { *(.rel*) } .ARM.exidx : { *(.ARM.exidx*) } DWARF_DEBUG ELF_DETAILS .ARM.attributes 0 : { *(.ARM.attributes) } /DISCARD/ : { *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ } VERSION { LINUX_2.6 { global: __vdso_clock_gettime; __vdso_gettimeofday; __vdso_clock_getres; __vdso_clock_gettime64; local: *; }; }
aixcc-public/challenge-001-exemplar-source
2,349
arch/arm64/kernel/vdso/vdso.lds.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * GNU linker script for the VDSO library. * * Copyright (C) 2012 ARM Limited * * Author: Will Deacon <will.deacon@arm.com> * Heavily based on the vDSO linker scripts for other archs. */ #include <linux/const.h> #include <asm/page.h> #include <asm/vdso.h> #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64") OUTPUT_ARCH(aarch64) SECTIONS { PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); #ifdef CONFIG_TIME_NS PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); #endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } /* * Discard .note.gnu.property sections which are unused and have * different alignment requirement from vDSO note sections. */ /DISCARD/ : { *(.note.GNU-stack .note.gnu.property) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text*) } :text =0xd503201f PROVIDE (__etext = .); PROVIDE (_etext = .); PROVIDE (etext = .); . = ALIGN(4); .altinstructions : { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } .dynamic : { *(.dynamic) } :text :dynamic .rela.dyn : ALIGN(8) { *(.rela .rela*) } .rodata : { *(.rodata*) *(.got) *(.got.plt) *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) } :text _end = .; PROVIDE(end = .); DWARF_DEBUG ELF_DETAILS /DISCARD/ : { *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) *(.eh_frame .eh_frame_hdr) } } /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ } /* * This controls what symbols we export from the DSO. */ VERSION { LINUX_2.6.39 { global: __kernel_rt_sigreturn; __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; local: *; }; } /* * Make the sigreturn code visible to the kernel. */ VDSO_sigtramp = __kernel_rt_sigreturn;
aixcc-public/challenge-001-exemplar-source
3,040
arch/arm64/kernel/vdso/sigreturn.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Sigreturn trampoline for returning from a signal when the SA_RESTORER * flag is not set. It serves primarily as a hall of shame for crappy * unwinders and features an exciting but mysterious NOP instruction. * * It's also fragile as hell, so please think twice before changing anything * in here. * * Copyright (C) 2012 ARM Limited * * Author: Will Deacon <will.deacon@arm.com> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unistd.h> .text /* * NOTE!!! You may notice that all of the .cfi directives in this file have * been commented out. This is because they have been shown to trigger segfaults * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread * cleanup handlers during the thread cancellation dance. By omitting the * directives, we trigger an arm64-specific fallback path in the unwinder which * recognises the signal frame and restores many of the registers directly from * the sigcontext. Re-enabling the cfi directives here therefore needs to be * much more comprehensive to reduce the risk of further regressions. */ /* Ensure that the mysterious NOP can be associated with a function. */ // .cfi_startproc /* * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in * the .eh_frame section to be annotated as a signal frame. This allows DWARF * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify * the next frame using the unmodified return address instead of subtracting 1, * which may yield the wrong FDE. */ // .cfi_signal_frame /* * Tell the unwinder where to locate the frame record linking back to the * interrupted context. We don't provide unwind info for registers other than * the frame pointer and the link register here; in practice, this is likely to * be insufficient for unwinding in C/C++ based runtimes, especially without a * means to restore the stack pointer. Thankfully, unwinders and debuggers * already have baked-in strategies for attempting to unwind out of signals. */ // .cfi_def_cfa x29, 0 // .cfi_offset x29, 0 * 8 // .cfi_offset x30, 1 * 8 /* * This mysterious NOP is required for some unwinders (e.g. libc++) that * unconditionally subtract one from the result of _Unwind_GetIP() in order to * identify the calling function. * Hack borrowed from arch/powerpc/kernel/vdso64/sigtramp.S. */ nop // Mysterious NOP /* * GDB, libgcc and libunwind rely on being able to identify the sigreturn * instruction sequence to unwind from signal handlers. We cannot, therefore, * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the * unwinder. Thankfully, this function is only ever called from a RET and so * omitting the landing pad is perfectly fine. */ SYM_CODE_START(__kernel_rt_sigreturn) // PLEASE DO NOT MODIFY mov x8, #__NR_rt_sigreturn // PLEASE DO NOT MODIFY svc #0 // PLEASE DO NOT MODIFY // .cfi_endproc SYM_CODE_END(__kernel_rt_sigreturn) emit_aarch64_feature_1_and
aixcc-public/challenge-001-exemplar-source
1,852
arch/arm64/kernel/probes/kprobes_trampoline.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * trampoline entry and return code for kretprobes. */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> .text .macro save_all_base_regs stp x0, x1, [sp, #S_X0] stp x2, x3, [sp, #S_X2] stp x4, x5, [sp, #S_X4] stp x6, x7, [sp, #S_X6] stp x8, x9, [sp, #S_X8] stp x10, x11, [sp, #S_X10] stp x12, x13, [sp, #S_X12] stp x14, x15, [sp, #S_X14] stp x16, x17, [sp, #S_X16] stp x18, x19, [sp, #S_X18] stp x20, x21, [sp, #S_X20] stp x22, x23, [sp, #S_X22] stp x24, x25, [sp, #S_X24] stp x26, x27, [sp, #S_X26] stp x28, x29, [sp, #S_X28] add x0, sp, #PT_REGS_SIZE stp lr, x0, [sp, #S_LR] /* * Construct a useful saved PSTATE */ mrs x0, nzcv mrs x1, daif orr x0, x0, x1 mrs x1, CurrentEL orr x0, x0, x1 mrs x1, SPSel orr x0, x0, x1 stp xzr, x0, [sp, #S_PC] .endm .macro restore_all_base_regs ldr x0, [sp, #S_PSTATE] and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) msr nzcv, x0 ldp x0, x1, [sp, #S_X0] ldp x2, x3, [sp, #S_X2] ldp x4, x5, [sp, #S_X4] ldp x6, x7, [sp, #S_X6] ldp x8, x9, [sp, #S_X8] ldp x10, x11, [sp, #S_X10] ldp x12, x13, [sp, #S_X12] ldp x14, x15, [sp, #S_X14] ldp x16, x17, [sp, #S_X16] ldp x18, x19, [sp, #S_X18] ldp x20, x21, [sp, #S_X20] ldp x22, x23, [sp, #S_X22] ldp x24, x25, [sp, #S_X24] ldp x26, x27, [sp, #S_X26] ldp x28, x29, [sp, #S_X28] .endm SYM_CODE_START(__kretprobe_trampoline) sub sp, sp, #PT_REGS_SIZE save_all_base_regs /* Setup a frame pointer. */ add x29, sp, #S_FP mov x0, sp bl trampoline_probe_handler /* * Replace trampoline address in lr with actual orig_ret_addr return * address. */ mov lr, x0 /* The frame pointer (x29) is restored with other registers. */ restore_all_base_regs add sp, sp, #PT_REGS_SIZE ret SYM_CODE_END(__kretprobe_trampoline)
aixcc-public/challenge-001-exemplar-source
5,939
arch/arm64/kvm/hyp/entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> */ #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> #include <asm/fpsimdmacros.h> #include <asm/kvm.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> #include <asm/kvm_mte.h> #include <asm/kvm_ptrauth.h> .text /* * u64 __guest_enter(struct kvm_vcpu *vcpu); */ SYM_FUNC_START(__guest_enter) // x0: vcpu // x1-x17: clobbered by macros // x29: guest context adr_this_cpu x1, kvm_hyp_ctxt, x2 // Store the hyp regs save_callee_saved_regs x1 // Save hyp's sp_el0 save_sp_el0 x1, x2 // Now the hyp state is stored if we have a pending RAS SError it must // affect the host or hyp. If any asynchronous exception is pending we // defer the guest entry. The DSB isn't necessary before v8.2 as any // SError would be fatal. alternative_if ARM64_HAS_RAS_EXTN dsb nshst isb alternative_else_nop_endif mrs x1, isr_el1 cbz x1, 1f mov x0, #ARM_EXCEPTION_IRQ ret 1: set_loaded_vcpu x0, x1, x2 add x29, x0, #VCPU_CONTEXT // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1) mte_switch_to_guest x29, x1, x2 // Macro ptrauth_switch_to_guest format: // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) // The below macro to restore guest keys is not implemented in C code // as it may cause Pointer Authentication key signing mismatch errors // when this feature is enabled for kernel code. ptrauth_switch_to_guest x29, x0, x1, x2 // Restore the guest's sp_el0 restore_sp_el0 x29, x0 // Restore guest regs x0-x17 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] // Restore guest regs x18-x29, lr restore_callee_saved_regs x29 // Do not touch any register after this! eret sb SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) // x2-x29,lr: vcpu regs // vcpu x0-x1 on the stack // If the hyp context is loaded, go straight to hyp_panic get_loaded_vcpu x0, x1 cbnz x0, 1f b hyp_panic 1: // The hyp context is saved so make sure it is restored to allow // hyp_panic to run at hyp and, subsequently, panic to run in the host. // This makes use of __guest_exit to avoid duplication but sets the // return address to tail call into hyp_panic. As a side effect, the // current state is saved to the guest context but it will only be // accurate if the guest had been completely restored. adr_this_cpu x0, kvm_hyp_ctxt, x1 adr_l x1, hyp_panic str x1, [x0, #CPU_XREG_OFFSET(30)] get_vcpu_ptr x1, x0 SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) // x0: return code // x1: vcpu // x2-x29,lr: vcpu regs // vcpu x0-x1 on the stack add x1, x1, #VCPU_CONTEXT ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) // Store the guest regs x2 and x3 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)] // Retrieve the guest regs x0-x1 from the stack ldp x2, x3, [sp], #16 // x0, x1 // Store the guest regs x0-x1 and x4-x17 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)] stp x4, x5, [x1, #CPU_XREG_OFFSET(4)] stp x6, x7, [x1, #CPU_XREG_OFFSET(6)] stp x8, x9, [x1, #CPU_XREG_OFFSET(8)] stp x10, x11, [x1, #CPU_XREG_OFFSET(10)] stp x12, x13, [x1, #CPU_XREG_OFFSET(12)] stp x14, x15, [x1, #CPU_XREG_OFFSET(14)] stp x16, x17, [x1, #CPU_XREG_OFFSET(16)] // Store the guest regs x18-x29, lr save_callee_saved_regs x1 // Store the guest's sp_el0 save_sp_el0 x1, x2 adr_this_cpu x2, kvm_hyp_ctxt, x3 // Macro ptrauth_switch_to_hyp format: // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3) // The below macro to save/restore keys is not implemented in C code // as it may cause Pointer Authentication key signing mismatch errors // when this feature is enabled for kernel code. ptrauth_switch_to_hyp x1, x2, x3, x4, x5 // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1) mte_switch_to_hyp x1, x2, x3 // Restore hyp's sp_el0 restore_sp_el0 x2, x3 // Now restore the hyp regs restore_callee_saved_regs x2 set_loaded_vcpu xzr, x2, x3 alternative_if ARM64_HAS_RAS_EXTN // If we have the RAS extensions we can consume a pending error // without an unmask-SError and isb. The ESB-instruction consumed any // pending guest error when we took the exception from the guest. mrs_s x2, SYS_DISR_EL1 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)] cbz x2, 1f msr_s SYS_DISR_EL1, xzr orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT) 1: ret alternative_else dsb sy // Synchronize against in-flight ld/st isb // Prevent an early read of side-effect free ISR mrs x2, isr_el1 tbnz x2, #8, 2f // ISR_EL1.A ret nop 2: alternative_endif // We know we have a pending asynchronous abort, now is the // time to flush it out. From your VAXorcist book, page 666: // "Threaten me not, oh Evil one! For I speak with // the power of DEC, and I command thee to show thyself!" mrs x2, elr_el2 mrs x3, esr_el2 mrs x4, spsr_el2 mov x5, x0 msr daifclr, #4 // Unmask aborts // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. abort_guest_exit_start: isb abort_guest_exit_end: msr daifset, #4 // Mask aborts ret _kvm_extable abort_guest_exit_start, 9997f _kvm_extable abort_guest_exit_end, 9997f 9997: msr daifset, #4 // Mask aborts mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) // restore the EL1 exception context so that we can report some // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 orr x0, x0, x5 1: ret SYM_FUNC_END(__guest_enter)
aixcc-public/challenge-001-exemplar-source
5,814
arch/arm64/kvm/hyp/hyp-entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015-2018 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> */ #include <linux/arm-smccc.h> #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> #include <asm/cpufeature.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/mmu.h> #include <asm/spectre.h> .macro save_caller_saved_regs_vect /* x0 and x1 were saved in the vector entry */ stp x2, x3, [sp, #-16]! stp x4, x5, [sp, #-16]! stp x6, x7, [sp, #-16]! stp x8, x9, [sp, #-16]! stp x10, x11, [sp, #-16]! stp x12, x13, [sp, #-16]! stp x14, x15, [sp, #-16]! stp x16, x17, [sp, #-16]! .endm .macro restore_caller_saved_regs_vect ldp x16, x17, [sp], #16 ldp x14, x15, [sp], #16 ldp x12, x13, [sp], #16 ldp x10, x11, [sp], #16 ldp x8, x9, [sp], #16 ldp x6, x7, [sp], #16 ldp x4, x5, [sp], #16 ldp x2, x3, [sp], #16 ldp x0, x1, [sp], #16 .endm .text el1_sync: // Guest trapped into EL2 mrs x0, esr_el2 ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH cmp x0, #ESR_ELx_EC_HVC64 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap /* * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. * The workaround has already been applied on the host, * so let's quickly get back to the guest. We don't bother * restoring x1, as it can be clobbered anyway. */ ldr x1, [sp] // Guest's x0 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 cbz w1, wa_epilogue /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ ARM_SMCCC_ARCH_WORKAROUND_2) cbz w1, wa_epilogue eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \ ARM_SMCCC_ARCH_WORKAROUND_3) cbnz w1, el1_trap wa_epilogue: mov x0, xzr add sp, sp, #16 eret sb el1_trap: get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_TRAP b __guest_exit el1_irq: el1_fiq: get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IRQ b __guest_exit el1_error: get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit el2_sync: /* Check for illegal exception return */ mrs x0, spsr_el2 tbnz x0, #20, 1f save_caller_saved_regs_vect stp x29, x30, [sp, #-16]! bl kvm_unexpected_el2_exception ldp x29, x30, [sp], #16 restore_caller_saved_regs_vect eret 1: /* Let's attempt a recovery from the illegal exception return */ get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IL b __guest_exit el2_error: save_caller_saved_regs_vect stp x29, x30, [sp, #-16]! bl kvm_unexpected_el2_exception ldp x29, x30, [sp], #16 restore_caller_saved_regs_vect eret sb .macro invalid_vector label, target = __guest_exit_panic .align 2 SYM_CODE_START_LOCAL(\label) b \target SYM_CODE_END(\label) .endm /* None of these should ever happen */ invalid_vector el2t_sync_invalid invalid_vector el2t_irq_invalid invalid_vector el2t_fiq_invalid invalid_vector el2t_error_invalid invalid_vector el2h_irq_invalid invalid_vector el2h_fiq_invalid .ltorg .align 11 .macro check_preamble_length start, end /* kvm_patch_vector_branch() generates code that jumps over the preamble. */ .if ((\end-\start) != KVM_VECTOR_PREAMBLE) .error "KVM vector preamble length mismatch" .endif .endm .macro valid_vect target .align 7 661: esb stp x0, x1, [sp, #-16]! 662: b \target check_preamble_length 661b, 662b .endm .macro invalid_vect target .align 7 661: nop stp x0, x1, [sp, #-16]! 662: b \target check_preamble_length 661b, 662b .endm SYM_CODE_START(__kvm_hyp_vector) invalid_vect el2t_sync_invalid // Synchronous EL2t invalid_vect el2t_irq_invalid // IRQ EL2t invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_error_invalid // Error EL2t valid_vect el2_sync // Synchronous EL2h invalid_vect el2h_irq_invalid // IRQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h valid_vect el2_error // Error EL2h valid_vect el1_sync // Synchronous 64-bit EL1 valid_vect el1_irq // IRQ 64-bit EL1 valid_vect el1_fiq // FIQ 64-bit EL1 valid_vect el1_error // Error 64-bit EL1 valid_vect el1_sync // Synchronous 32-bit EL1 valid_vect el1_irq // IRQ 32-bit EL1 valid_vect el1_fiq // FIQ 32-bit EL1 valid_vect el1_error // Error 32-bit EL1 SYM_CODE_END(__kvm_hyp_vector) .macro spectrev2_smccc_wa1_smc sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] stp x0, x1, [sp, #(8 * 2)] alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3 /* Patched to mov WA3 when supported */ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 alternative_cb_end smc #0 ldp x2, x3, [sp, #(8 * 0)] add sp, sp, #(8 * 2) .endm .macro hyp_ventry indirect, spectrev2 .align 7 1: esb .if \spectrev2 != 0 spectrev2_smccc_wa1_smc .else stp x0, x1, [sp, #-16]! mitigate_spectre_bhb_loop x0 mitigate_spectre_bhb_clear_insn .endif .if \indirect != 0 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch /* * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with: * * movz x0, #(addr & 0xffff) * movk x0, #((addr >> 16) & 0xffff), lsl #16 * movk x0, #((addr >> 32) & 0xffff), lsl #32 * br x0 * * Where: * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE. * See kvm_patch_vector_branch for details. */ nop nop nop nop alternative_cb_end .endif b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE) .endm .macro generate_vectors indirect, spectrev2 0: .rept 16 hyp_ventry \indirect, \spectrev2 .endr .org 0b + SZ_2K // Safety measure .endm .align 11 SYM_CODE_START(__bp_harden_hyp_vecs) generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ .org 1b SYM_CODE_END(__bp_harden_hyp_vecs)
aixcc-public/challenge-001-exemplar-source
5,700
arch/arm64/kvm/hyp/nvhe/hyp-init.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> */ #include <linux/arm-smccc.h> #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> #include <asm/el2_setup.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> #include <asm/pgtable-hwdef.h> #include <asm/sysreg.h> #include <asm/virt.h> .text .pushsection .idmap.text, "ax" .align 11 SYM_CODE_START(__kvm_hyp_init) ventry __invalid // Synchronous EL2t ventry __invalid // IRQ EL2t ventry __invalid // FIQ EL2t ventry __invalid // Error EL2t ventry __invalid // Synchronous EL2h ventry __invalid // IRQ EL2h ventry __invalid // FIQ EL2h ventry __invalid // Error EL2h ventry __do_hyp_init // Synchronous 64-bit EL1 ventry __invalid // IRQ 64-bit EL1 ventry __invalid // FIQ 64-bit EL1 ventry __invalid // Error 64-bit EL1 ventry __invalid // Synchronous 32-bit EL1 ventry __invalid // IRQ 32-bit EL1 ventry __invalid // FIQ 32-bit EL1 ventry __invalid // Error 32-bit EL1 __invalid: b . /* * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers. * * x0: SMCCC function ID * x1: struct kvm_nvhe_init_params PA */ __do_hyp_init: /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.lo __kvm_handle_stub_hvc mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) cmp x0, x3 b.eq 1f mov x0, #SMCCC_RET_NOT_SUPPORTED eret 1: mov x0, x1 mov x3, lr bl ___kvm_hyp_init // Clobbers x0..x2 mov lr, x3 /* Hello, World! */ mov x0, #SMCCC_RET_SUCCESS eret SYM_CODE_END(__kvm_hyp_init) /* * Initialize the hypervisor in EL2. * * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers * and leave x3 for the caller. * * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START_LOCAL(___kvm_hyp_init) ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] msr tpidr_el2, x1 ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA] mov sp, x1 ldr x1, [x0, #NVHE_INIT_MAIR_EL2] msr mair_el2, x1 ldr x1, [x0, #NVHE_INIT_HCR_EL2] msr hcr_el2, x1 ldr x1, [x0, #NVHE_INIT_VTTBR] msr vttbr_el2, x1 ldr x1, [x0, #NVHE_INIT_VTCR] msr vtcr_el2, x1 ldr x1, [x0, #NVHE_INIT_PGD_PA] phys_to_ttbr x2, x1 alternative_if ARM64_HAS_CNP orr x2, x2, #TTBR_CNP_BIT alternative_else_nop_endif msr ttbr0_el2, x2 /* * Set the PS bits in TCR_EL2. */ ldr x0, [x0, #NVHE_INIT_TCR_EL2] tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2 msr tcr_el2, x0 isb /* Invalidate the stale TLBs from Bootloader */ tlbi alle2 tlbi vmalls12e1 dsb sy mov_q x0, INIT_SCTLR_EL2_MMU_ON alternative_if ARM64_HAS_ADDRESS_AUTH mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) orr x0, x0, x1 alternative_else_nop_endif msr sctlr_el2, x0 isb /* Set the host vector */ ldr x0, =__kvm_hyp_host_vector msr vbar_el2, x0 ret SYM_CODE_END(___kvm_hyp_init) /* * PSCI CPU_ON entry point * * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START(kvm_hyp_cpu_entry) mov x1, #1 // is_cpu_on = true b __kvm_hyp_init_cpu SYM_CODE_END(kvm_hyp_cpu_entry) /* * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point * * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START(kvm_hyp_cpu_resume) mov x1, #0 // is_cpu_on = false b __kvm_hyp_init_cpu SYM_CODE_END(kvm_hyp_cpu_resume) /* * Common code for CPU entry points. Initializes EL2 state and * installs the hypervisor before handing over to a C handler. * * x0: struct kvm_nvhe_init_params PA * x1: bool is_cpu_on */ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) mov x28, x0 // Stash arguments mov x29, x1 /* Check that the core was booted in EL2. */ mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq 2f /* The core booted in EL1. KVM cannot be initialized on it. */ 1: wfe wfi b 1b 2: msr SPsel, #1 // We want to use SP_EL{1,2} /* Initialize EL2 CPU state to sane values. */ init_el2_state // Clobbers x0..x2 /* Enable MMU, set vectors and stack. */ mov x0, x28 bl ___kvm_hyp_init // Clobbers x0..x2 /* Leave idmap. */ mov x0, x29 ldr x1, =kvm_host_psci_cpu_entry br x1 SYM_CODE_END(__kvm_hyp_init_cpu) SYM_CODE_START(__kvm_handle_stub_hvc) cmp x0, #HVC_SOFT_RESTART b.ne 1f /* This is where we're about to jump, staying at EL2 */ msr elr_el2, x1 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h) msr spsr_el2, x0 /* Shuffle the arguments, and don't come back */ mov x0, x2 mov x1, x3 mov x2, x4 b reset 1: cmp x0, #HVC_RESET_VECTORS b.ne 1f /* * Set the HVC_RESET_VECTORS return code before entering the common * path so that we do not clobber x0-x2 in case we are coming via * HVC_SOFT_RESTART. */ mov x0, xzr reset: /* Reset kvm back to the hyp stub. */ mov_q x5, INIT_SCTLR_EL2_MMU_OFF pre_disable_mmu_workaround msr sctlr_el2, x5 isb alternative_if ARM64_KVM_PROTECTED_MODE mov_q x5, HCR_HOST_NVHE_FLAGS msr hcr_el2, x5 alternative_else_nop_endif /* Install stub vectors */ adr_l x5, __hyp_stub_vectors msr vbar_el2, x5 eret 1: /* Bad stub call */ mov_q x0, HVC_STUB_ERR eret SYM_CODE_END(__kvm_handle_stub_hvc) SYM_FUNC_START(__pkvm_init_switch_pgd) /* Turn the MMU off */ pre_disable_mmu_workaround mrs x2, sctlr_el2 bic x3, x2, #SCTLR_ELx_M msr sctlr_el2, x3 isb tlbi alle2 /* Install the new pgtables */ ldr x3, [x0, #NVHE_INIT_PGD_PA] phys_to_ttbr x4, x3 alternative_if ARM64_HAS_CNP orr x4, x4, #TTBR_CNP_BIT alternative_else_nop_endif msr ttbr0_el2, x4 /* Set the new stack pointer */ ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA] mov sp, x0 /* And turn the MMU back on! */ set_sctlr_el2 x2 ret x1 SYM_FUNC_END(__pkvm_init_switch_pgd) .popsection
aixcc-public/challenge-001-exemplar-source
7,197
arch/arm64/kvm/hyp/nvhe/host.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - Google Inc * Author: Andrew Scull <ascull@google.com> */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> .text SYM_FUNC_START(__host_exit) get_host_ctxt x0, x1 /* Store the host regs x2 and x3 */ stp x2, x3, [x0, #CPU_XREG_OFFSET(2)] /* Retrieve the host regs x0-x1 from the stack */ ldp x2, x3, [sp], #16 // x0, x1 /* Store the host regs x0-x1 and x4-x17 */ stp x2, x3, [x0, #CPU_XREG_OFFSET(0)] stp x4, x5, [x0, #CPU_XREG_OFFSET(4)] stp x6, x7, [x0, #CPU_XREG_OFFSET(6)] stp x8, x9, [x0, #CPU_XREG_OFFSET(8)] stp x10, x11, [x0, #CPU_XREG_OFFSET(10)] stp x12, x13, [x0, #CPU_XREG_OFFSET(12)] stp x14, x15, [x0, #CPU_XREG_OFFSET(14)] stp x16, x17, [x0, #CPU_XREG_OFFSET(16)] /* Store the host regs x18-x29, lr */ save_callee_saved_regs x0 /* Save the host context pointer in x29 across the function call */ mov x29, x0 bl handle_trap /* Restore host regs x0-x17 */ __host_enter_restore_full: ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] /* x0-7 are use for panic arguments */ __host_enter_for_panic: ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] /* Restore host regs x18-x29, lr */ restore_callee_saved_regs x29 /* Do not touch any register after this! */ __host_enter_without_restoring: eret sb SYM_FUNC_END(__host_exit) /* * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); */ SYM_FUNC_START(__host_enter) mov x29, x0 b __host_enter_restore_full SYM_FUNC_END(__host_enter) /* * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, * u64 elr, u64 par); */ SYM_FUNC_START(__hyp_do_panic) /* Prepare and exit to the host's panic funciton. */ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr adr_l lr, nvhe_hyp_panic_handler hyp_kimg_va lr, x6 msr elr_el2, lr mov x29, x0 #ifdef CONFIG_NVHE_EL2_DEBUG /* Ensure host stage-2 is disabled */ mrs x0, hcr_el2 bic x0, x0, #HCR_VM msr hcr_el2, x0 isb tlbi vmalls12e1 dsb nsh #endif /* Load the panic arguments into x0-7 */ mrs x0, esr_el2 mov x4, x3 mov x3, x2 hyp_pa x3, x6 get_vcpu_ptr x5, x6 mrs x6, far_el2 mrs x7, hpfar_el2 /* Enter the host, conditionally restoring the host context. */ cbz x29, __host_enter_without_restoring b __host_enter_for_panic SYM_FUNC_END(__hyp_do_panic) SYM_FUNC_START(__host_hvc) ldp x0, x1, [sp] // Don't fixup the stack yet /* No stub for you, sonny Jim */ alternative_if ARM64_KVM_PROTECTED_MODE b __host_exit alternative_else_nop_endif /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.hs __host_exit add sp, sp, #16 /* * Compute the idmap address of __kvm_handle_stub_hvc and * jump there. * * Preserve x0-x4, which may contain stub parameters. */ adr_l x5, __kvm_handle_stub_hvc hyp_pa x5, x6 br x5 SYM_FUNC_END(__host_hvc) .macro host_el1_sync_vect .align 7 .L__vect_start\@: stp x0, x1, [sp, #-16]! mrs x0, esr_el2 ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH cmp x0, #ESR_ELx_EC_HVC64 b.eq __host_hvc b __host_exit .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) .error "host_el1_sync_vect larger than vector entry" .endif .endm .macro invalid_host_el2_vect .align 7 /* * Test whether the SP has overflowed, without corrupting a GPR. * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit * of SP should always be 1. */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@ sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp /* If a guest is loaded, panic out of it. */ stp x0, x1, [sp, #-16]! get_loaded_vcpu x0, x1 cbnz x0, __guest_exit_panic add sp, sp, #16 /* * The panic may not be clean if the exception is taken before the host * context has been saved by __host_exit or after the hyp context has * been partially clobbered by __host_enter. */ b hyp_panic .L__hyp_sp_overflow\@: /* Switch to the overflow stack */ adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 b hyp_panic_bad_stack ASM_BUG() .endm .macro invalid_host_el1_vect .align 7 mov x0, xzr /* restore_host = false */ mrs x1, spsr_el2 mrs x2, elr_el2 mrs x3, par_el1 b __hyp_do_panic .endm /* * The host vector does not use an ESB instruction in order to avoid consuming * SErrors that should only be consumed by the host. Guest entry is deferred by * __guest_enter if there are any pending asynchronous exceptions so hyp will * always return to the host without having consumerd host SErrors. * * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the * host knows about the EL2 vectors already, and there is no point in hiding * them. */ .align 11 SYM_CODE_START(__kvm_hyp_host_vector) invalid_host_el2_vect // Synchronous EL2t invalid_host_el2_vect // IRQ EL2t invalid_host_el2_vect // FIQ EL2t invalid_host_el2_vect // Error EL2t invalid_host_el2_vect // Synchronous EL2h invalid_host_el2_vect // IRQ EL2h invalid_host_el2_vect // FIQ EL2h invalid_host_el2_vect // Error EL2h host_el1_sync_vect // Synchronous 64-bit EL1/EL0 invalid_host_el1_vect // IRQ 64-bit EL1/EL0 invalid_host_el1_vect // FIQ 64-bit EL1/EL0 invalid_host_el1_vect // Error 64-bit EL1/EL0 host_el1_sync_vect // Synchronous 32-bit EL1/EL0 invalid_host_el1_vect // IRQ 32-bit EL1/EL0 invalid_host_el1_vect // FIQ 32-bit EL1/EL0 invalid_host_el1_vect // Error 32-bit EL1/EL0 SYM_CODE_END(__kvm_hyp_host_vector) /* * Forward SMC with arguments in struct kvm_cpu_context, and * store the result into the same struct. Assumes SMCCC 1.2 or older. * * x0: struct kvm_cpu_context* */ SYM_CODE_START(__kvm_hyp_host_forward_smc) /* * Use x18 to keep the pointer to the host context because * x18 is callee-saved in SMCCC but not in AAPCS64. */ mov x18, x0 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)] ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)] ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)] ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)] ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)] ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)] ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)] smc #0 stp x0, x1, [x18, #CPU_XREG_OFFSET(0)] stp x2, x3, [x18, #CPU_XREG_OFFSET(2)] stp x4, x5, [x18, #CPU_XREG_OFFSET(4)] stp x6, x7, [x18, #CPU_XREG_OFFSET(6)] stp x8, x9, [x18, #CPU_XREG_OFFSET(8)] stp x10, x11, [x18, #CPU_XREG_OFFSET(10)] stp x12, x13, [x18, #CPU_XREG_OFFSET(12)] stp x14, x15, [x18, #CPU_XREG_OFFSET(14)] stp x16, x17, [x18, #CPU_XREG_OFFSET(16)] ret SYM_CODE_END(__kvm_hyp_host_forward_smc)
aixcc-public/challenge-001-exemplar-source
7,159
arch/xtensa/kernel/head.S
/* * arch/xtensa/kernel/head.S * * Xtensa Processor startup code. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Kevin Chea */ #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cacheasm.h> #include <asm/initialize_mmu.h> #include <asm/mxregs.h> #include <linux/init.h> #include <linux/linkage.h> /* * This module contains the entry code for kernel images. It performs the * minimal setup needed to call the generic C routines. * * Prerequisites: * * - The kernel image has been loaded to the actual address where it was * compiled to. * - a2 contains either 0 or a pointer to a list of boot parameters. * (see setup.c for more details) * */ /* * _start * * The bootloader passes a pointer to a list of boot parameters in a2. */ /* The first bytes of the kernel image must be an instruction, so we * manually allocate and define the literal constant we need for a jx * instruction. */ __HEAD .begin no-absolute-literals ENTRY(_start) /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ wsr a2, excsave1 _j _SetupOCD .align 4 .literal_position _SetupOCD: /* * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ #if XCHAL_HAVE_WINDOWED movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync #endif movi a1, LOCKLEVEL wsr a1, ps rsync .global _SetupMMU _SetupMMU: Offset = _SetupMMU - _start #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY rsr a2, excsave1 movi a3, XCHAL_KSEG_PADDR bltu a2, a3, 1f sub a2, a2, a3 movi a3, XCHAL_KSEG_SIZE bgeu a2, a3, 1f movi a3, XCHAL_KSEG_CACHED_VADDR add a2, a2, a3 wsr a2, excsave1 1: #endif #endif movi a0, _startup jx a0 ENDPROC(_start) .end no-absolute-literals __REF .literal_position ENTRY(_startup) /* Set a0 to 0 for the remaining initialization. */ movi a0, 0 #if XCHAL_HAVE_VECBASE movi a2, VECBASE_VADDR wsr a2, vecbase #endif /* Clear debugging registers. */ #if XCHAL_HAVE_DEBUG #if XCHAL_NUM_IBREAK > 0 wsr a0, ibreakenable #endif wsr a0, icount movi a1, 15 wsr a0, icountlevel .set _index, 0 .rept XCHAL_NUM_DBREAK wsr a0, SREG_DBREAKC + _index .set _index, _index + 1 .endr #endif /* Clear CCOUNT (not really necessary, but nice) */ wsr a0, ccount # not really necessary, but nice /* Disable zero-loops. */ #if XCHAL_HAVE_LOOPS wsr a0, lcount #endif /* Disable all timers. */ .set _index, 0 .rept XCHAL_NUM_TIMERS wsr a0, SREG_CCOMPARE + _index .set _index, _index + 1 .endr /* Interrupt initialization. */ movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE wsr a0, intenable wsr a2, intclear /* Disable coprocessors. */ #if XCHAL_HAVE_CP wsr a0, cpenable #endif /* Initialize the caches. * a2, a3 are just working registers (clobbered). */ #if XCHAL_DCACHE_LINE_LOCKABLE ___unlock_dcache_all a2 a3 #endif #if XCHAL_ICACHE_LINE_LOCKABLE ___unlock_icache_all a2 a3 #endif ___invalidate_dcache_all a2 a3 ___invalidate_icache_all a2 a3 isync initialize_cacheattr #ifdef CONFIG_HAVE_SMP movi a2, CCON # MX External Register to Configure Cache movi a3, 1 wer a3, a2 #endif /* Setup stack and enable window exceptions (keep irqs disabled) */ movi a1, start_info l32i a1, a1, 0 /* Disable interrupts. */ /* Enable window exceptions if kernel is built with windowed ABI. */ movi a2, KERNEL_PS_WOE_MASK | LOCKLEVEL wsr a2, ps rsync #ifdef CONFIG_SMP /* * Notice that we assume with SMP that cores have PRID * supported by the cores. */ rsr a2, prid bnez a2, .Lboot_secondary #endif /* CONFIG_SMP */ /* Unpack data sections * * The linker script used to build the Linux kernel image * creates a table located at __boot_reloc_table_start * that contains the information what data needs to be unpacked. * * Uses a2-a7. */ movi a2, __boot_reloc_table_start movi a3, __boot_reloc_table_end 1: beq a2, a3, 3f # no more entries? l32i a4, a2, 0 # start destination (in RAM) l32i a5, a2, 4 # end destination (in RAM) l32i a6, a2, 8 # start source (in ROM) addi a2, a2, 12 # next entry beq a4, a5, 1b # skip, empty entry beq a4, a6, 1b # skip, source and dest. are the same 2: l32i a7, a6, 0 # load word addi a6, a6, 4 s32i a7, a4, 0 # store word addi a4, a4, 4 bltu a4, a5, 2b j 1b 3: /* All code and initialized data segments have been copied. * Now clear the BSS segment. */ movi a2, __bss_start # start of BSS movi a3, __bss_stop # end of BSS __loopt a2, a3, a4, 2 s32i a0, a2, 0 __endla a2, a3, 4 #if XCHAL_DCACHE_IS_WRITEBACK /* After unpacking, flush the writeback cache to memory so the * instructions/data are available. */ ___flush_dcache_all a2 a3 #endif memw isync ___invalidate_icache_all a2 a3 isync #ifdef CONFIG_XIP_KERNEL /* Setup bootstrap CPU stack in XIP kernel */ movi a1, start_info l32i a1, a1, 0 #endif movi abi_arg0, 0 xsr abi_arg0, excsave1 /* init_arch kick-starts the linux kernel */ abi_call init_arch abi_call start_kernel should_never_return: j should_never_return #ifdef CONFIG_SMP .Lboot_secondary: movi a2, cpu_start_ccount 1: memw l32i a3, a2, 0 beqi a3, 0, 1b movi a3, 0 s32i a3, a2, 0 1: memw l32i a3, a2, 0 beqi a3, 0, 1b wsr a3, ccount movi a3, 0 s32i a3, a2, 0 memw movi abi_arg0, 0 wsr abi_arg0, excsave1 abi_call secondary_start_kernel j should_never_return #endif /* CONFIG_SMP */ ENDPROC(_startup) #ifdef CONFIG_HOTPLUG_CPU ENTRY(cpu_restart) #if XCHAL_DCACHE_IS_WRITEBACK ___flush_invalidate_dcache_all a2 a3 #else ___invalidate_dcache_all a2 a3 #endif memw movi a2, CCON # MX External Register to Configure Cache movi a3, 0 wer a3, a2 extw rsr a0, prid neg a2, a0 movi a3, cpu_start_id memw s32i a2, a3, 0 #if XCHAL_DCACHE_IS_WRITEBACK dhwbi a3, 0 #endif 1: memw l32i a2, a3, 0 dhi a3, 0 bne a2, a0, 1b /* * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync movi a1, LOCKLEVEL wsr a1, ps rsync j _startup ENDPROC(cpu_restart) #endif /* CONFIG_HOTPLUG_CPU */ /* * DATA section */ __REFDATA .align 4 ENTRY(start_info) .long init_thread_union + KERNEL_STACK_SIZE /* * BSS section */ __PAGE_ALIGNED_BSS #ifdef CONFIG_MMU ENTRY(swapper_pg_dir) .fill PAGE_SIZE, 1, 0 END(swapper_pg_dir) #endif ENTRY(empty_zero_page) .fill PAGE_SIZE, 1, 0 END(empty_zero_page)
aixcc-public/challenge-001-exemplar-source
22,407
arch/xtensa/kernel/vectors.S
/* * arch/xtensa/kernel/vectors.S * * This file contains all exception vectors (user, kernel, and double), * as well as the window vectors (overflow and underflow), and the debug * vector. These are the primary vectors executed by the processor if an * exception occurs. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2005 - 2008 Tensilica, Inc. * * Chris Zankel <chris@zankel.net> * */ /* * We use a two-level table approach. The user and kernel exception vectors * use a first-level dispatch table to dispatch the exception to a registered * fast handler or the default handler, if no fast handler was registered. * The default handler sets up a C-stack and dispatches the exception to a * registerd C handler in the second-level dispatch table. * * Fast handler entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original value in depc * a3: dispatch table * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * * The value for PT_DEPC saved to stack also functions as a boolean to * indicate that the exception is either a double or a regular exception: * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Note: Neither the kernel nor the user exception handler generate literals. * */ #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/asmmacro.h> #include <asm/ptrace.h> #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/thread_info.h> #include <asm/vectors.h> #define WINDOW_VECTORS_SIZE 0x180 /* * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0) * * We get here when an exception occurred while we were in userland. * We switch to the kernel stack and jump to the first level handler * associated to the exception cause. * * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already * decremented by PT_USER_SIZE. */ .section .UserExceptionVector.text, "ax" ENTRY(_UserExceptionVector) xsr a3, excsave1 # save a3 and get dispatch table wsr a2, depc # save a2 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2 s32i a0, a2, PT_AREG0 # save a0 to ESF rsr a0, exccause # retrieve exception cause s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler xsr a3, excsave1 # restore a3 and dispatch table jx a0 ENDPROC(_UserExceptionVector) /* * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) * * We get this exception when we were already in kernel space. * We decrement the current stack pointer (kernel) by PT_KERNEL_SIZE and * jump to the first-level handler associated with the exception cause. * * Note: we need to preserve space for the spill region. */ .section .KernelExceptionVector.text, "ax" ENTRY(_KernelExceptionVector) xsr a3, excsave1 # save a3, and get dispatch table wsr a2, depc # save a2 addi a2, a1, -16 - PT_KERNEL_SIZE # adjust stack pointer s32i a0, a2, PT_AREG0 # save a0 to ESF rsr a0, exccause # retrieve exception cause s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address xsr a3, excsave1 # restore a3 and dispatch table jx a0 ENDPROC(_KernelExceptionVector) /* * Double exception vector (Exceptions with PS.EXCM == 1) * We get this exception when another exception occurs while were are * already in an exception, such as window overflow/underflow exception, * or 'expected' exceptions, for example memory exception when we were trying * to read data from an invalid address in user space. * * Note that this vector is never invoked for level-1 interrupts, because such * interrupts are disabled (masked) when PS.EXCM is set. * * We decode the exception and take the appropriate action. However, the * double exception vector is much more careful, because a lot more error * cases go through the double exception vector than through the user and * kernel exception vectors. * * Occasionally, the kernel expects a double exception to occur. This usually * happens when accessing user-space memory with the user's permissions * (l32e/s32e instructions). The kernel state, though, is not always suitable * for immediate transfer of control to handle_double, where "normal" exception * processing occurs. Also in kernel mode, TLB misses can occur if accessing * vmalloc memory, possibly requiring repair in a double exception handler. * * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as * a boolean variable and a pointer to a fixup routine. If the variable * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of * zero indicates to use the default kernel/user exception handler. * There is only one exception, when the value is identical to the exc_table * label, the kernel is in trouble. This mechanism is used to protect critical * sections, mainly when the handler writes to the stack to assert the stack * pointer is valid. Once the fixup/default handler leaves that area, the * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero. * * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the * nonzero address of a fixup routine before it could cause a double exception * and reset it before it returns. * * Some other things to take care of when a fast exception handler doesn't * specify a particular fixup handler but wants to use the default handlers: * * - The original stack pointer (in a1) must not be modified. The fast * exception handler should only use a2 as the stack pointer. * * - If the fast handler manipulates the stack pointer (in a2), it has to * register a valid fixup handler and cannot use the default handlers. * * - The handler can use any other generic register from a3 to a15, but it * must save the content of these registers to stack (PT_AREG3...PT_AREGx) * * - These registers must be saved before a double exception can occur. * * - If we ever implement handling signals while in double exceptions, the * number of registers a fast handler has saved (excluding a0 and a1) must * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. ) * * The fixup handlers are special handlers: * * - Fixup entry conditions differ from regular exceptions: * * a0: DEPC * a1: a1 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable * depc: a0 * excsave_1: a3 * * - When the kernel enters the fixup handler, it still assumes it is in a * critical section, so EXC_TABLE_FIXUP variable is set to exc_table. * The fixup handler, therefore, has to re-register itself as the fixup * handler before it returns from the double exception. * * - Fixup handler can share the same exception frame with the fast handler. * The kernel stack pointer is not changed when entering the fixup handler. * * - Fixup handlers can jump to the default kernel and user exception * handlers. Before it jumps, though, it has to setup a exception frame * on stack. Because the default handler resets the register fixup handler * the fixup handler must make sure that the default handler returns to * it instead of the exception address, so it can re-register itself as * the fixup handler. * * In case of a critical condition where the kernel cannot recover, we jump * to unrecoverable_exception with the following entry conditions. * All registers a0...a15 are unchanged from the last exception, except: * * a0: last address before we jumped to the unrecoverable_exception. * excsave_1: a0 * * * See the handle_alloca_user and spill_registers routines for example clients. * * FIXME: Note: we currently don't allow signal handling coming from a double * exception, so the item markt with (*) is not required. */ .section .DoubleExceptionVector.text, "ax" ENTRY(_DoubleExceptionVector) xsr a3, excsave1 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE /* Check for kernel double exception (usually fatal). */ rsr a2, ps _bbsi.l a2, PS_UM_BIT, 1f j .Lksp .align 4 .literal_position 1: /* Check if we are currently handling a window exception. */ /* Note: We don't need to indicate that we enter a critical section. */ xsr a0, depc # get DEPC, save a0 #ifdef SUPPORT_WINDOWED movi a2, WINDOW_VECTORS_VADDR _bltu a0, a2, .Lfixup addi a2, a2, WINDOW_VECTORS_SIZE _bgeu a0, a2, .Lfixup /* Window overflow/underflow exception. Get stack pointer. */ l32i a2, a3, EXC_TABLE_KSTK /* Check for overflow/underflow exception, jump if overflow. */ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow /* * Restart window underflow exception. * Currently: * depc = orig a0, * a0 = orig DEPC, * a2 = new sp based on KSTK from exc_table * a3 = excsave_1 * excsave_1 = orig a3 * * We return to the instruction in user space that caused the window * underflow exception. Therefore, we change window base to the value * before we entered the window underflow exception and prepare the * registers to return as if we were coming from a regular exception * by changing depc (in a0). * Note: We can trash the current window frame (a0...a3) and depc! */ _DoubleExceptionVector_WindowUnderflow: xsr a3, excsave1 wsr a2, depc # save stack pointer temporarily rsr a0, ps extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH wsr a0, windowbase rsync /* We are now in the previous window frame. Save registers again. */ xsr a2, depc # save a2 and get stack pointer s32i a0, a2, PT_AREG0 xsr a3, excsave1 rsr a0, exccause s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 xsr a3, excsave1 l32i a0, a0, EXC_TABLE_FAST_USER jx a0 #else j .Lfixup #endif /* * We only allow the ITLB miss exception if we are in kernel space. * All other exceptions are unexpected and thus unrecoverable! */ #ifdef CONFIG_MMU .extern fast_second_level_miss_double_kernel .Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ rsr a3, exccause beqi a3, EXCCAUSE_ITLB_MISS, 1f addi a3, a3, -EXCCAUSE_DTLB_MISS bnez a3, .Lunrecoverable 1: movi a3, fast_second_level_miss_double_kernel jx a3 #else .equ .Lksp, .Lunrecoverable #endif /* Critical! We can't handle this situation. PANIC! */ .extern unrecoverable_exception .Lunrecoverable_fixup: l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a0, depc .Lunrecoverable: rsr a3, excsave1 wsr a0, excsave1 call0 unrecoverable_exception .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */ /* Enter critical section. */ l32i a2, a3, EXC_TABLE_FIXUP s32i a3, a3, EXC_TABLE_FIXUP beq a2, a3, .Lunrecoverable_fixup # critical section beqz a2, .Ldflt # no handler was registered /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */ jx a2 .Ldflt: /* Get stack pointer. */ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE addi a2, a2, -PT_USER_SIZE /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */ s32i a0, a2, PT_DEPC l32i a0, a3, EXC_TABLE_DOUBLE_SAVE xsr a0, depc s32i a0, a2, PT_AREG0 /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */ rsr a0, exccause addx4 a0, a0, a3 xsr a3, excsave1 l32i a0, a0, EXC_TABLE_FAST_USER jx a0 #ifdef SUPPORT_WINDOWED /* * Restart window OVERFLOW exception. * Currently: * depc = orig a0, * a0 = orig DEPC, * a2 = new sp based on KSTK from exc_table * a3 = EXCSAVE_1 * excsave_1 = orig a3 * * We return to the instruction in user space that caused the window * overflow exception. Therefore, we change window base to the value * before we entered the window overflow exception and prepare the * registers to return as if we were coming from a regular exception * by changing DEPC (in a0). * * NOTE: We CANNOT trash the current window frame (a0...a3), but we * can clobber depc. * * The tricky part here is that overflow8 and overflow12 handlers * save a0, then clobber a0. To restart the handler, we have to restore * a0 if the double exception was past the point where a0 was clobbered. * * To keep things simple, we take advantage of the fact all overflow * handlers save a0 in their very first instruction. If DEPC was past * that instruction, we can safely restore a0 from where it was saved * on the stack. * * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3 */ _DoubleExceptionVector_WindowOverflow: extui a2, a0, 0, 6 # get offset into 64-byte vector handler beqz a2, 1f # if at start of vector, don't restore addi a0, a0, -128 bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12 /* * This fixup handler is for the extremely unlikely case where the * overflow handler's reference thru a0 gets a hardware TLB refill * that bumps out the (distinct, aliasing) TLB entry that mapped its * prior references thru a9/a13, and where our reference now thru * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill). */ movi a2, window_overflow_restore_a0_fixup s32i a2, a3, EXC_TABLE_FIXUP l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 bbsi.l a0, 7, 2f /* * Restore a0 as saved by _WindowOverflow8(). */ l32e a0, a9, -16 wsr a0, depc # replace the saved a0 j 3f 2: /* * Restore a0 as saved by _WindowOverflow12(). */ l32e a0, a13, -16 wsr a0, depc # replace the saved a0 3: xsr a3, excsave1 movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP s32i a2, a3, EXC_TABLE_DOUBLE_SAVE 1: /* * Restore WindowBase while leaving all address registers restored. * We have to use ROTW for this, because WSR.WINDOWBASE requires * an address register (which would prevent restore). * * Window Base goes from 0 ... 7 (Module 8) * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s */ rsr a0, ps extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH rsr a2, windowbase sub a0, a2, a0 extui a0, a0, 0, 3 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 beqi a0, 1, .L1pane beqi a0, 3, .L3pane rsr a0, depc rotw -2 /* * We are now in the user code's original window frame. * Process the exception as a user exception as if it was * taken by the user code. * * This is similar to the user exception vector, * except that PT_DEPC isn't set to EXCCAUSE. */ 1: xsr a3, excsave1 wsr a2, depc l32i a2, a3, EXC_TABLE_KSTK s32i a0, a2, PT_AREG0 rsr a0, exccause s32i a0, a2, PT_DEPC _DoubleExceptionVector_handle_exception: addi a0, a0, -EXCCAUSE_UNALIGNED beqz a0, 2f addx4 a0, a0, a3 l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED xsr a3, excsave1 jx a0 2: movi a0, user_exception xsr a3, excsave1 jx a0 .L1pane: rsr a0, depc rotw -1 j 1b .L3pane: rsr a0, depc rotw -3 j 1b #endif ENDPROC(_DoubleExceptionVector) #ifdef SUPPORT_WINDOWED /* * Fixup handler for TLB miss in double exception handler for window owerflow. * We get here with windowbase set to the window that was being spilled and * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12 * (bit set) window. * * We do the following here: * - go to the original window retaining a0 value; * - set up exception stack to return back to appropriate a0 restore code * (we'll need to rotate window back and there's no place to save this * information, use different return address for that); * - handle the exception; * - go to the window that was being spilled; * - set up window_overflow_restore_a0_fixup as a fixup routine; * - reload a0; * - restore the original window; * - reset the default fixup routine; * - return to user. By the time we get to this fixup handler all information * about the conditions of the original double exception that happened in * the window overflow handler is lost, so we just return to userspace to * retry overflow from start. * * a0: value of depc, original value in depc * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable, original value in excsave1 */ __XTENSA_HANDLER .literal_position ENTRY(window_overflow_restore_a0_fixup) rsr a0, ps extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH rsr a2, windowbase sub a0, a2, a0 extui a0, a0, 0, 3 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 _beqi a0, 1, .Lhandle_1 _beqi a0, 3, .Lhandle_3 .macro overflow_fixup_handle_exception_pane n rsr a0, depc rotw -\n xsr a3, excsave1 wsr a2, depc l32i a2, a3, EXC_TABLE_KSTK s32i a0, a2, PT_AREG0 movi a0, .Lrestore_\n s32i a0, a2, PT_DEPC rsr a0, exccause j _DoubleExceptionVector_handle_exception .endm overflow_fixup_handle_exception_pane 2 .Lhandle_1: overflow_fixup_handle_exception_pane 1 .Lhandle_3: overflow_fixup_handle_exception_pane 3 .macro overflow_fixup_restore_a0_pane n rotw \n /* Need to preserve a0 value here to be able to handle exception * that may occur on a0 reload from stack. It may occur because * TLB miss handler may not be atomic and pointer to page table * may be lost before we get here. There are no free registers, * so we need to use EXC_TABLE_DOUBLE_SAVE area. */ xsr a3, excsave1 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE movi a2, window_overflow_restore_a0_fixup s32i a2, a3, EXC_TABLE_FIXUP l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 bbsi.l a0, 7, 1f l32e a0, a9, -16 j 2f 1: l32e a0, a13, -16 2: rotw -\n .endm .Lrestore_2: overflow_fixup_restore_a0_pane 2 .Lset_default_fixup: xsr a3, excsave1 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE movi a2, 0 s32i a2, a3, EXC_TABLE_FIXUP l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 rfe .Lrestore_1: overflow_fixup_restore_a0_pane 1 j .Lset_default_fixup .Lrestore_3: overflow_fixup_restore_a0_pane 3 j .Lset_default_fixup ENDPROC(window_overflow_restore_a0_fixup) #endif /* * Debug interrupt vector * * There is not much space here, so simply jump to another handler. * EXCSAVE[DEBUGLEVEL] has been set to that handler. */ .section .DebugInterruptVector.text, "ax" ENTRY(_DebugInterruptVector) xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL s32i a0, a3, DT_DEBUG_SAVE l32i a0, a3, DT_DEBUG_EXCEPTION jx a0 ENDPROC(_DebugInterruptVector) /* * Medium priority level interrupt vectors * * Each takes less than 16 (0x10) bytes, no literals, by placing * the extra 8 bytes that would otherwise be required in the window * vectors area where there is space. With relocatable vectors, * all vectors are within ~ 4 kB range of each other, so we can * simply jump (J) to another vector without having to use JX. * * common_exception code gets current IRQ level in PS.INTLEVEL * and preserves it for the IRQ handling time. */ .macro irq_entry_level level .if XCHAL_EXCM_LEVEL >= \level .section .Level\level\()InterruptVector.text, "ax" ENTRY(_Level\level\()InterruptVector) wsr a0, excsave2 rsr a0, epc\level wsr a0, epc1 .if \level <= LOCKLEVEL movi a0, EXCCAUSE_LEVEL1_INTERRUPT .else movi a0, EXCCAUSE_MAPPED_NMI .endif wsr a0, exccause rsr a0, eps\level # branch to user or kernel vector j _SimulateUserKernelVectorException .endif .endm irq_entry_level 2 irq_entry_level 3 irq_entry_level 4 irq_entry_level 5 irq_entry_level 6 #if XCHAL_EXCM_LEVEL >= 2 /* * Continuation of medium priority interrupt dispatch code. * On entry here, a0 contains PS, and EPC2 contains saved a0: */ __XTENSA_HANDLER .align 4 _SimulateUserKernelVectorException: addi a0, a0, (1 << PS_EXCM_BIT) #if !XTENSA_FAKE_NMI wsr a0, ps #endif bbsi.l a0, PS_UM_BIT, 1f # branch if user mode xsr a0, excsave2 # restore a0 j _KernelExceptionVector # simulate kernel vector exception 1: xsr a0, excsave2 # restore a0 j _UserExceptionVector # simulate user vector exception #endif /* Window overflow and underflow handlers. * The handlers must be 64 bytes apart, first starting with the underflow * handlers underflow-4 to underflow-12, then the overflow handlers * overflow-4 to overflow-12. * * Note: We rerun the underflow handlers if we hit an exception, so * we try to access any page that would cause a page fault early. */ #define ENTRY_ALIGN64(name) \ .globl name; \ .align 64; \ name: .section .WindowVectors.text, "ax" #ifdef SUPPORT_WINDOWED /* 4-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow4) s32e a0, a5, -16 s32e a1, a5, -12 s32e a2, a5, -8 s32e a3, a5, -4 rfwo ENDPROC(_WindowOverflow4) /* 4-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow4) l32e a0, a5, -16 l32e a1, a5, -12 l32e a2, a5, -8 l32e a3, a5, -4 rfwu ENDPROC(_WindowUnderflow4) /* 8-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow8) s32e a0, a9, -16 l32e a0, a1, -12 s32e a2, a9, -8 s32e a1, a9, -12 s32e a3, a9, -4 s32e a4, a0, -32 s32e a5, a0, -28 s32e a6, a0, -24 s32e a7, a0, -20 rfwo ENDPROC(_WindowOverflow8) /* 8-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow8) l32e a1, a9, -12 l32e a0, a9, -16 l32e a7, a1, -12 l32e a2, a9, -8 l32e a4, a7, -32 l32e a3, a9, -4 l32e a5, a7, -28 l32e a6, a7, -24 l32e a7, a7, -20 rfwu ENDPROC(_WindowUnderflow8) /* 12-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow12) s32e a0, a13, -16 l32e a0, a1, -12 s32e a1, a13, -12 s32e a2, a13, -8 s32e a3, a13, -4 s32e a4, a0, -48 s32e a5, a0, -44 s32e a6, a0, -40 s32e a7, a0, -36 s32e a8, a0, -32 s32e a9, a0, -28 s32e a10, a0, -24 s32e a11, a0, -20 rfwo ENDPROC(_WindowOverflow12) /* 12-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow12) l32e a1, a13, -12 l32e a0, a13, -16 l32e a11, a1, -12 l32e a2, a13, -8 l32e a4, a11, -48 l32e a8, a11, -32 l32e a3, a13, -4 l32e a5, a11, -44 l32e a6, a11, -40 l32e a7, a11, -36 l32e a9, a11, -28 l32e a10, a11, -24 l32e a11, a11, -20 rfwu ENDPROC(_WindowUnderflow12) #endif .text
aixcc-public/challenge-001-exemplar-source
54,109
arch/xtensa/kernel/entry.S
/* * Low-level exception handling * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004 - 2008 by Tensilica Inc. * Copyright (C) 2015 Cadence Design Systems Inc. * * Chris Zankel <chris@zankel.net> * */ #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/coprocessor.h> #include <asm/thread_info.h> #include <asm/asm-uaccess.h> #include <asm/unistd.h> #include <asm/ptrace.h> #include <asm/current.h> #include <asm/page.h> #include <asm/signal.h> #include <asm/tlbflush.h> #include <variant/tie-asm.h> /* * Macro to find first bit set in WINDOWBASE from the left + 1 * * 100....0 -> 1 * 010....0 -> 2 * 000....1 -> WSBITS */ .macro ffs_ws bit mask #if XCHAL_HAVE_NSA nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 #else movi \bit, WSBITS #if WSBITS > 16 _bltui \mask, 0x10000, 99f addi \bit, \bit, -16 extui \mask, \mask, 16, 16 #endif #if WSBITS > 8 99: _bltui \mask, 0x100, 99f addi \bit, \bit, -8 srli \mask, \mask, 8 #endif 99: _bltui \mask, 0x10, 99f addi \bit, \bit, -4 srli \mask, \mask, 4 99: _bltui \mask, 0x4, 99f addi \bit, \bit, -2 srli \mask, \mask, 2 99: _bltui \mask, 0x2, 99f addi \bit, \bit, -1 99: #endif .endm .macro irq_save flags tmp #if XTENSA_FAKE_NMI #if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL rsr \flags, ps extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei \tmp, LOCKLEVEL, 99f rsil \tmp, LOCKLEVEL 99: #else movi \tmp, LOCKLEVEL rsr \flags, ps or \flags, \flags, \tmp xsr \flags, ps rsync #endif #else rsil \flags, LOCKLEVEL #endif .endm /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ /* * First-level exception handler for user exceptions. * Save some special registers, extra states and all registers in the AR * register file that were in use in the user task, and jump to the common * exception code. * We save SAR (used to calculate WMASK), and WB and WS (we don't have to * save them for kernel exceptions). * * Entry condition for user_exception: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original value in depc * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Entry condition for _user_exception: * * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC * excsave has been restored, and * stack pointer (a1) has been set. * * Note: _user_exception might be at an odd address. Don't use call0..call12 */ .literal_position ENTRY(user_exception) /* Save a1, a2, a3, and set SP. */ rsr a0, depc s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 mov a1, a2 .globl _user_exception _user_exception: /* Save SAR and turn off single stepping */ movi a2, 0 wsr a2, depc # terminate user stack trace with 0 rsr a3, sar xsr a2, icountlevel s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL #if XCHAL_HAVE_THREADPTR rur a2, threadptr s32i a2, a1, PT_THREADPTR #endif /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ #if defined(USER_SUPPORT_WINDOWED) rsr a2, windowbase rsr a3, windowstart ssr a2 s32i a2, a1, PT_WINDOWBASE s32i a3, a1, PT_WINDOWSTART slli a2, a3, 32-WSBITS src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for restoring registers #else movi a2, 0 movi a3, 1 s32i a2, a1, PT_WINDOWBASE s32i a3, a1, PT_WINDOWSTART s32i a3, a1, PT_WMASK #endif /* Save only live registers. */ UABI_W _bbsi.l a2, 1, .Lsave_window_registers s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 UABI_W _bbsi.l a2, 2, .Lsave_window_registers s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 UABI_W _bbsi.l a2, 3, .Lsave_window_registers s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 #if defined(USER_SUPPORT_WINDOWED) /* If only one valid frame skip saving regs. */ beqi a2, 1, common_exception /* Save the remaining registers. * We have to save all registers up to the first '1' from * the right, except the current frame (bit 0). * Assume a2 is: 001001000110001 * All register frames starting from the top field to the marked '1' * must be saved. */ .Lsave_window_registers: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 and a3, a3, a2 # max. only one bit is set /* Find number of frames to save */ ffs_ws a0, a3 # number of frames to the '1' from left /* Store information into WMASK: * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, * bits 4...: number of valid 4-register frames */ slli a3, a0, 4 # number of frames to save in bits 8..4 extui a2, a2, 0, 4 # mask for the first 16 registers or a2, a3, a2 s32i a2, a1, PT_WMASK # needed when we restore the reg-file /* Save 4 registers at a time */ 1: rotw -1 s32i a0, a5, PT_AREG_END - 16 s32i a1, a5, PT_AREG_END - 12 s32i a2, a5, PT_AREG_END - 8 s32i a3, a5, PT_AREG_END - 4 addi a0, a4, -1 addi a1, a5, -16 _bnez a0, 1b /* WINDOWBASE still in SAR! */ rsr a2, sar # original WINDOWBASE movi a3, 1 ssl a2 sll a3, a3 wsr a3, windowstart # set corresponding WINDOWSTART bit wsr a2, windowbase # and WINDOWSTART rsync /* We are back to the original stack pointer (a1) */ #endif /* Now, jump to the common exception handler. */ j common_exception ENDPROC(user_exception) /* * First-level exit handler for kernel exceptions * Save special registers and the live window frame. * Note: Even though we changes the stack pointer, we don't have to do a * MOVSP here, as we do that when we return from the exception. * (See comment in the kernel exception exit code) * * Entry condition for kernel_exception: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Entry condition for _kernel_exception: * * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC * excsave has been restored, and * stack pointer (a1) has been set. * * Note: _kernel_exception might be at an odd address. Don't use call0..call12 */ ENTRY(kernel_exception) /* Save a1, a2, a3, and set SP. */ rsr a0, depc # get a2 s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 mov a1, a2 .globl _kernel_exception _kernel_exception: /* Save SAR and turn off single stepping */ movi a2, 0 rsr a3, sar xsr a2, icountlevel s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL #if defined(__XTENSA_WINDOWED_ABI__) /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ rsr a2, windowbase # don't need to save these, we only rsr a3, windowstart # need shifted windowstart: windowmask ssr a2 slli a2, a3, 32-WSBITS src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for kernel_exception_exit #endif /* Save only the live window-frame */ KABI_W _bbsi.l a2, 1, 1f s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 KABI_W _bbsi.l a2, 2, 1f s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 KABI_W _bbsi.l a2, 3, 1f s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 #ifdef __XTENSA_WINDOWED_ABI__ _bnei a2, 1, 1f /* Copy spill slots of a0 and a1 to imitate movsp * in order to keep exception stack continuous */ l32i a3, a1, PT_KERNEL_SIZE l32i a0, a1, PT_KERNEL_SIZE + 4 s32e a3, a1, -16 s32e a0, a1, -12 #endif 1: l32i a0, a1, PT_AREG0 # restore saved a0 wsr a0, depc /* * This is the common exception handler. * We get here from the user exception handler or simply by falling through * from the kernel exception handler. * Save the remaining special registers, switch to kernel mode, and jump * to the second-level exception handler. * */ common_exception: /* Save some registers, disable loops and clear the syscall flag. */ rsr a2, debugcause rsr a3, epc1 s32i a2, a1, PT_DEBUGCAUSE s32i a3, a1, PT_PC movi a2, NO_SYSCALL rsr a3, excvaddr s32i a2, a1, PT_SYSCALL movi a2, 0 s32i a3, a1, PT_EXCVADDR #if XCHAL_HAVE_LOOPS xsr a2, lcount s32i a2, a1, PT_LCOUNT #endif #if XCHAL_HAVE_EXCLUSIVE /* Clear exclusive access monitor set by interrupted code */ clrex #endif /* It is now save to restore the EXC_TABLE_FIXUP variable. */ rsr a2, exccause movi a3, 0 rsr a0, excsave1 s32i a2, a1, PT_EXCCAUSE s32i a3, a0, EXC_TABLE_FIXUP /* All unrecoverable states are saved on stack, now, and a1 is valid. * Now we can allow exceptions again. In case we've got an interrupt * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, * otherwise it's left unchanged. * * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) */ rsr a3, ps s32i a3, a1, PT_PS # save ps #if XTENSA_FAKE_NMI /* Correct PS needs to be saved in the PT_PS: * - in case of exception or level-1 interrupt it's in the PS, * and is already saved. * - in case of medium level interrupt it's in the excsave2. */ movi a0, EXCCAUSE_MAPPED_NMI extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH beq a2, a0, .Lmedium_level_irq bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 .Lmedium_level_irq: rsr a0, excsave2 s32i a0, a1, PT_PS # save medium-level interrupt ps bgei a3, LOCKLEVEL, .Lexception .Llevel1_irq: movi a3, LOCKLEVEL .Lexception: KABI_W movi a0, PS_WOE_MASK KABI_W or a3, a3, a0 #else addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT movi a0, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt KABI_W movi a2, PS_WOE_MASK KABI_W or a3, a3, a2 #endif /* restore return address (or 0 if return to userspace) */ rsr a0, depc wsr a3, ps rsync # PS.WOE => rsync => overflow /* Save lbeg, lend */ #if XCHAL_HAVE_LOOPS rsr a4, lbeg rsr a3, lend s32i a4, a1, PT_LBEG s32i a3, a1, PT_LEND #endif /* Save SCOMPARE1 */ #if XCHAL_HAVE_S32C1I rsr a3, scompare1 s32i a3, a1, PT_SCOMPARE1 #endif /* Save optional registers. */ save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT #ifdef CONFIG_TRACE_IRQFLAGS rsr abi_tmp0, ps extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH beqz abi_tmp0, 1f abi_call trace_hardirqs_off 1: #endif #ifdef CONFIG_CONTEXT_TRACKING_USER l32i abi_tmp0, a1, PT_PS bbci.l abi_tmp0, PS_UM_BIT, 1f abi_call user_exit_callable 1: #endif /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE rsr abi_tmp0, excsave1 addx4 abi_tmp0, abi_arg1, abi_tmp0 l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler mov abi_arg0, a1 # pass stack frame /* Call the second-level handler */ abi_callx abi_tmp0 /* Jump here for exception exit */ .global common_exception_return common_exception_return: #if XTENSA_FAKE_NMI l32i abi_tmp0, a1, PT_EXCCAUSE movi abi_tmp1, EXCCAUSE_MAPPED_NMI l32i abi_saved1, a1, PT_PS beq abi_tmp0, abi_tmp1, .Lrestore_state #endif .Ltif_loop: irq_save abi_tmp0, abi_tmp1 #ifdef CONFIG_TRACE_IRQFLAGS abi_call trace_hardirqs_off #endif /* Jump if we are returning from kernel exceptions. */ l32i abi_saved1, a1, PT_PS GET_THREAD_INFO(abi_tmp0, a1) l32i abi_saved0, abi_tmp0, TI_FLAGS _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, * and have to restore WB and WS, extra states, and all registers * in the register file that were in use in the user task. * Note that we don't disable interrupts here. */ _bbsi.l abi_saved0, TIF_NEED_RESCHED, .Lresched movi abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL bnone abi_saved0, abi_tmp0, .Lexit_tif_loop_user l32i abi_tmp0, a1, PT_DEPC bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state /* Call do_signal() */ #ifdef CONFIG_TRACE_IRQFLAGS abi_call trace_hardirqs_on #endif rsil abi_tmp0, 0 mov abi_arg0, a1 abi_call do_notify_resume # int do_notify_resume(struct pt_regs*) j .Ltif_loop .Lresched: #ifdef CONFIG_TRACE_IRQFLAGS abi_call trace_hardirqs_on #endif rsil abi_tmp0, 0 abi_call schedule # void schedule (void) j .Ltif_loop .Lexit_tif_loop_kernel: #ifdef CONFIG_PREEMPTION _bbci.l abi_saved0, TIF_NEED_RESCHED, .Lrestore_state /* Check current_thread_info->preempt_count */ l32i abi_tmp1, abi_tmp0, TI_PRE_COUNT bnez abi_tmp1, .Lrestore_state abi_call preempt_schedule_irq #endif j .Lrestore_state .Lexit_tif_loop_user: #ifdef CONFIG_CONTEXT_TRACKING_USER abi_call user_enter_callable #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT _bbci.l abi_saved0, TIF_DB_DISABLED, 1f abi_call restore_dbreak 1: #endif #ifdef CONFIG_DEBUG_TLB_SANITY l32i abi_tmp0, a1, PT_DEPC bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state abi_call check_tlb_sanity #endif .Lrestore_state: #ifdef CONFIG_TRACE_IRQFLAGS extui abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei abi_tmp0, LOCKLEVEL, 1f abi_call trace_hardirqs_on 1: #endif /* * Restore optional registers. * abi_arg* are used as temporary registers here. */ load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT /* Restore SCOMPARE1 */ #if XCHAL_HAVE_S32C1I l32i abi_tmp0, a1, PT_SCOMPARE1 wsr abi_tmp0, scompare1 #endif wsr abi_saved1, ps /* disable interrupts */ _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit user_exception_exit: /* Restore the state of the task and return from the exception. */ #if defined(USER_SUPPORT_WINDOWED) /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ l32i a2, a1, PT_WINDOWBASE l32i a3, a1, PT_WINDOWSTART wsr a1, depc # use DEPC as temp storage wsr a3, windowstart # restore WINDOWSTART ssr a2 # preserve user's WB in the SAR wsr a2, windowbase # switch to user's saved WB rsync rsr a1, depc # restore stack pointer l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) rotw -1 # we restore a4..a7 _bltui a6, 16, .Lclear_regs # only have to restore current window? /* The working registers are a0 and a3. We are restoring to * a4..a7. Be careful not to destroy what we have just restored. * Note: wmask has the format YYYYM: * Y: number of registers saved in groups of 4 * M: 4 bit mask of first 16 registers */ mov a2, a6 mov a3, a5 1: rotw -1 # a0..a3 become a4..a7 addi a3, a7, -4*4 # next iteration addi a2, a6, -16 # decrementing Y in WMASK l32i a4, a3, PT_AREG_END + 0 l32i a5, a3, PT_AREG_END + 4 l32i a6, a3, PT_AREG_END + 8 l32i a7, a3, PT_AREG_END + 12 _bgeui a2, 16, 1b /* Clear unrestored registers (don't leak anything to user-land */ .Lclear_regs: rsr a0, windowbase rsr a3, sar sub a3, a0, a3 beqz a3, 2f extui a3, a3, 0, WBBITS 1: rotw -1 addi a3, a7, -1 movi a4, 0 movi a5, 0 movi a6, 0 movi a7, 0 bgei a3, 1, 1b /* We are back were we were when we started. * Note: a2 still contains WMASK (if we've returned to the original * frame where we had loaded a2), or at least the lower 4 bits * (if we have restored WSBITS-1 frames). */ 2: #else movi a2, 1 #endif #if XCHAL_HAVE_THREADPTR l32i a3, a1, PT_THREADPTR wur a3, threadptr #endif j common_exception_exit /* This is the kernel exception exit. * We avoided to do a MOVSP when we entered the exception, but we * have to do it here. */ kernel_exception_exit: #if defined(__XTENSA_WINDOWED_ABI__) /* Check if we have to do a movsp. * * We only have to do a movsp if the previous window-frame has * been spilled to the *temporary* exception stack instead of the * task's stack. This is the case if the corresponding bit in * WINDOWSTART for the previous window-frame was set before * (not spilled) but is zero now (spilled). * If this bit is zero, all other bits except the one for the * current window frame are also zero. So, we can use a simple test: * 'and' WINDOWSTART and WINDOWSTART-1: * * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* * * The result is zero only if one bit was set. * * (Note: We might have gone through several task switches before * we come back to the current task, so WINDOWBASE might be * different from the time the exception occurred.) */ /* Test WINDOWSTART before and after the exception. * We actually have WMASK, so we only have to test if it is 1 or not. */ l32i a2, a1, PT_WMASK _beqi a2, 1, common_exception_exit # Spilled before exception,jump /* Test WINDOWSTART now. If spilled, do the movsp */ rsr a3, windowstart addi a0, a3, -1 and a3, a3, a0 _bnez a3, common_exception_exit /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ addi a0, a1, -16 l32i a3, a0, 0 l32i a4, a0, 4 s32i a3, a1, PT_KERNEL_SIZE + 0 s32i a4, a1, PT_KERNEL_SIZE + 4 l32i a3, a0, 8 l32i a4, a0, 12 s32i a3, a1, PT_KERNEL_SIZE + 8 s32i a4, a1, PT_KERNEL_SIZE + 12 /* Common exception exit. * We restore the special register and the current window frame, and * return from the exception. * * Note: We expect a2 to hold PT_WMASK */ #else movi a2, 1 #endif common_exception_exit: /* Restore address registers. */ _bbsi.l a2, 1, 1f l32i a4, a1, PT_AREG4 l32i a5, a1, PT_AREG5 l32i a6, a1, PT_AREG6 l32i a7, a1, PT_AREG7 _bbsi.l a2, 2, 1f l32i a8, a1, PT_AREG8 l32i a9, a1, PT_AREG9 l32i a10, a1, PT_AREG10 l32i a11, a1, PT_AREG11 _bbsi.l a2, 3, 1f l32i a12, a1, PT_AREG12 l32i a13, a1, PT_AREG13 l32i a14, a1, PT_AREG14 l32i a15, a1, PT_AREG15 /* Restore PC, SAR */ 1: l32i a2, a1, PT_PC l32i a3, a1, PT_SAR wsr a2, epc1 wsr a3, sar /* Restore LBEG, LEND, LCOUNT */ #if XCHAL_HAVE_LOOPS l32i a2, a1, PT_LBEG l32i a3, a1, PT_LEND wsr a2, lbeg l32i a2, a1, PT_LCOUNT wsr a3, lend wsr a2, lcount #endif /* We control single stepping through the ICOUNTLEVEL register. */ l32i a2, a1, PT_ICOUNTLEVEL movi a3, -2 wsr a2, icountlevel wsr a3, icount /* Check if it was double exception. */ l32i a0, a1, PT_DEPC l32i a3, a1, PT_AREG3 l32i a2, a1, PT_AREG2 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f /* Restore a0...a3 and return */ l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfe 1: wsr a0, depc l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfde ENDPROC(kernel_exception) /* * Debug exception handler. * * Currently, we don't support KGDB, so only user application can be debugged. * * When we get here, a0 is trashed and saved to excsave[debuglevel] */ .literal_position ENTRY(debug_exception) rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL bbsi.l a0, PS_EXCM_BIT, .Ldebug_exception_in_exception # exception mode /* Set EPC1 and EXCCAUSE */ wsr a2, depc # save a2 temporarily rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL wsr a2, epc1 movi a2, EXCCAUSE_MAPPED_DEBUG wsr a2, exccause /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ movi a2, 1 << PS_EXCM_BIT or a2, a0, a2 wsr a2, ps /* Switch to kernel/user stack, restore jump vector, and save a0 */ bbsi.l a2, PS_UM_BIT, .Ldebug_exception_user # jump if user mode addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack .Ldebug_exception_continue: l32i a0, a3, DT_DEBUG_SAVE s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG0 movi a0, 0 s32i a0, a2, PT_DEPC # mark it as a regular exception xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL xsr a0, depc s32i a3, a2, PT_AREG3 s32i a0, a2, PT_AREG2 mov a1, a2 /* Debug exception is handled as an exception, so interrupts will * likely be enabled in the common exception handler. Disable * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM * meaning. */ #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT) GET_THREAD_INFO(a2, a1) l32i a3, a2, TI_PRE_COUNT addi a3, a3, 1 s32i a3, a2, TI_PRE_COUNT #endif rsr a2, ps bbsi.l a2, PS_UM_BIT, _user_exception j _kernel_exception .Ldebug_exception_user: rsr a2, excsave1 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer j .Ldebug_exception_continue .Ldebug_exception_in_exception: #ifdef CONFIG_HAVE_HW_BREAKPOINT /* Debug exception while in exception mode. This may happen when * window overflow/underflow handler or fast exception handler hits * data breakpoint, in which case save and disable all data * breakpoints, single-step faulting instruction and restore data * breakpoints. */ bbci.l a0, PS_UM_BIT, .Ldebug_exception_in_exception # jump if kernel mode rsr a0, debugcause bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak .set _index, 0 .rept XCHAL_NUM_DBREAK l32i a0, a3, DT_DBREAKC_SAVE + _index * 4 wsr a0, SREG_DBREAKC + _index .set _index, _index + 1 .endr l32i a0, a3, DT_ICOUNT_LEVEL_SAVE wsr a0, icountlevel l32i a0, a3, DT_ICOUNT_SAVE xsr a0, icount l32i a0, a3, DT_DEBUG_SAVE xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL rfi XCHAL_DEBUGLEVEL .Ldebug_save_dbreak: .set _index, 0 .rept XCHAL_NUM_DBREAK movi a0, 0 xsr a0, SREG_DBREAKC + _index s32i a0, a3, DT_DBREAKC_SAVE + _index * 4 .set _index, _index + 1 .endr movi a0, XCHAL_EXCM_LEVEL + 1 xsr a0, icountlevel s32i a0, a3, DT_ICOUNT_LEVEL_SAVE movi a0, 0xfffffffe xsr a0, icount s32i a0, a3, DT_ICOUNT_SAVE l32i a0, a3, DT_DEBUG_SAVE xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL rfi XCHAL_DEBUGLEVEL #else /* Debug exception while in exception mode. Should not happen. */ j .Ldebug_exception_in_exception // FIXME!! #endif ENDPROC(debug_exception) /* * We get here in case of an unrecoverable exception. * The only thing we can do is to be nice and print a panic message. * We only produce a single stack frame for panic, so ??? * * * Entry conditions: * * - a0 contains the caller address; original value saved in excsave1. * - the original a0 contains a valid return address (backtrace) or 0. * - a2 contains a valid stackpointer * * Notes: * * - If the stack pointer could be invalid, the caller has to setup a * dummy stack pointer (e.g. the stack of the init_task) * * - If the return address could be invalid, the caller has to set it * to 0, so the backtrace would stop. * */ .align 4 unrecoverable_text: .ascii "Unrecoverable error in exception handler\0" .literal_position ENTRY(unrecoverable_exception) #if XCHAL_HAVE_WINDOWED movi a0, 1 movi a1, 0 wsr a0, windowstart wsr a1, windowbase rsync #endif movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL wsr a1, ps rsync movi a1, init_task movi a0, 0 addi a1, a1, PT_REGS_OFFSET movi abi_arg0, unrecoverable_text abi_call panic 1: j 1b ENDPROC(unrecoverable_exception) /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ __XTENSA_HANDLER .literal_position #ifdef SUPPORT_WINDOWED /* * Fast-handler for alloca exceptions * * The ALLOCA handler is entered when user code executes the MOVSP * instruction and the caller's frame is not in the register file. * * This algorithm was taken from the Ross Morley's RTOS Porting Layer: * * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S * * It leverages the existing window spill/fill routines and their support for * double exceptions. The 'movsp' instruction will only cause an exception if * the next window needs to be loaded. In fact this ALLOCA exception may be * replaced at some point by changing the hardware to do a underflow exception * of the proper size instead. * * This algorithm simply backs out the register changes started by the user * exception handler, makes it appear that we have started a window underflow * by rotating the window back and then setting the old window base (OWB) in * the 'ps' register with the rolled back window base. The 'movsp' instruction * will be re-executed and this time since the next window frames is in the * active AR registers it won't cause an exception. * * If the WindowUnderflow code gets a TLB miss the page will get mapped * the partial WindowUnderflow will be handled in the double exception * handler. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_alloca) rsr a0, windowbase rotw -1 rsr a2, ps extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH xor a3, a3, a4 l32i a4, a6, PT_AREG0 l32i a1, a6, PT_DEPC rsr a6, depc wsr a1, depc slli a3, a3, PS_OWB_SHIFT xor a2, a2, a3 wsr a2, ps rsync _bbci.l a4, 31, 4f rotw -1 _bbci.l a8, 30, 8f rotw -1 j _WindowUnderflow12 8: j _WindowUnderflow8 4: j _WindowUnderflow4 ENDPROC(fast_alloca) #endif #ifdef CONFIG_USER_ABI_CALL0_PROBE /* * fast illegal instruction handler. * * This is used to fix up user PS.WOE on the exception caused * by the first opcode related to register window. If PS.WOE is * already set it goes directly to the common user exception handler. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table */ ENTRY(fast_illegal_instruction_user) rsr a0, ps bbsi.l a0, PS_WOE_BIT, 1f s32i a3, a2, PT_AREG3 movi a3, PS_WOE_MASK or a0, a0, a3 wsr a0, ps #ifdef CONFIG_USER_ABI_CALL0_PROBE GET_THREAD_INFO(a3, a2) rsr a0, epc1 s32i a0, a3, TI_PS_WOE_FIX_ADDR #endif l32i a3, a2, PT_AREG3 l32i a0, a2, PT_AREG0 rsr a2, depc rfe 1: call0 user_exception ENDPROC(fast_illegal_instruction_user) #endif /* * fast system calls. * * WARNING: The kernel doesn't save the entire user context before * handling a fast system call. These functions are small and short, * usually offering some functionality not available to user tasks. * * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table */ ENTRY(fast_syscall_user) /* Skip syscall. */ rsr a0, epc1 addi a0, a0, 3 wsr a0, epc1 l32i a0, a2, PT_DEPC bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable rsr a0, depc # get syscall-nr _beqz a0, fast_syscall_spill_registers _beqi a0, __NR_xtensa, fast_syscall_xtensa call0 user_exception ENDPROC(fast_syscall_user) ENTRY(fast_syscall_unrecoverable) /* Restore all states. */ l32i a0, a2, PT_AREG0 # restore a0 xsr a2, depc # restore a2, depc wsr a0, excsave1 call0 unrecoverable_exception ENDPROC(fast_syscall_unrecoverable) /* * sysxtensa syscall handler * * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); * a2 a6 a3 a4 a5 * * Entry condition: * * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in a0 and DEPC * a3: a3 * a4..a15: unchanged * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Note: we don't have to save a2; a2 holds the return value */ .literal_position #ifdef CONFIG_FAST_SYSCALL_XTENSA ENTRY(fast_syscall_xtensa) s32i a7, a2, PT_AREG7 # we need an additional register movi a7, 4 # sizeof(unsigned int) access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp _bgeui a6, SYS_XTENSA_COUNT, .Lill _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp /* Fall through for ATOMIC_CMP_SWP. */ .Lswp: /* Atomic compare and swap */ EX(.Leac) l32i a0, a3, 0 # read old value bne a0, a4, 1f # same as old value? jump EX(.Leac) s32i a5, a3, 0 # different, modify value l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 rfe 1: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 0 # return 0 (note that we cannot set rfe .Lnswp: /* Atomic set, add, and exg_add. */ EX(.Leac) l32i a7, a3, 0 # orig addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set addi a6, a6, SYS_XTENSA_ATOMIC_SET EX(.Leac) s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 l32i a7, a0, PT_AREG7 # restore a7 l32i a0, a0, PT_AREG0 # restore a0 rfe .Leac: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EFAULT rfe .Lill: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EINVAL rfe ENDPROC(fast_syscall_xtensa) #else /* CONFIG_FAST_SYSCALL_XTENSA */ ENTRY(fast_syscall_xtensa) l32i a0, a2, PT_AREG0 # restore a0 movi a2, -ENOSYS rfe ENDPROC(fast_syscall_xtensa) #endif /* CONFIG_FAST_SYSCALL_XTENSA */ /* fast_syscall_spill_registers. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. */ #if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \ defined(USER_SUPPORT_WINDOWED) ENTRY(fast_syscall_spill_registers) /* Register a FIXUP handler (pass current wb as a parameter) */ xsr a3, excsave1 movi a0, fast_syscall_spill_registers_fixup s32i a0, a3, EXC_TABLE_FIXUP rsr a0, windowbase s32i a0, a3, EXC_TABLE_PARAM xsr a3, excsave1 # restore a3 and excsave_1 /* Save a3, a4 and SAR on stack. */ rsr a0, sar s32i a3, a2, PT_AREG3 s32i a0, a2, PT_SAR /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ s32i a4, a2, PT_AREG4 s32i a7, a2, PT_AREG7 s32i a8, a2, PT_AREG8 s32i a11, a2, PT_AREG11 s32i a12, a2, PT_AREG12 s32i a15, a2, PT_AREG15 /* * Rotate ws so that the current windowbase is at bit 0. * Assume ws = xxxwww1yy (www1 current window frame). * Rotate ws right so that a4 = yyxxxwww1. */ rsr a0, windowbase rsr a3, windowstart # a3 = xxxwww1yy ssr a0 # holds WB slli a0, a3, WSBITS or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 /* We are done if there are no more than the current register frame. */ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww movi a0, (1 << (WSBITS-1)) _beqz a3, .Lnospill # only one active frame? jump /* We want 1 at the top, so that we return to the current windowbase */ or a3, a3, a0 # 1yyxxxwww /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ wsr a3, windowstart # save shifted windowstart neg a0, a3 and a3, a0, a3 # first bit set from right: 000010000 ffs_ws a0, a3 # a0: shifts to skip empty frames movi a3, WSBITS sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right ssr a0 # save in SAR for later. rsr a3, windowbase add a3, a3, a0 wsr a3, windowbase rsync rsr a3, windowstart srl a3, a3 # shift windowstart /* WB is now just one frame below the oldest frame in the register window. WS is shifted so the oldest frame is in bit 0, thus, WB and WS differ by one 4-register frame. */ /* Save frames. Depending what call was used (call4, call8, call12), * we have to save 4,8. or 12 registers. */ .Lloop: _bbsi.l a3, 1, .Lc4 _bbci.l a3, 2, .Lc12 .Lc8: s32e a4, a13, -16 l32e a4, a5, -12 s32e a8, a4, -32 s32e a5, a13, -12 s32e a6, a13, -8 s32e a7, a13, -4 s32e a9, a4, -28 s32e a10, a4, -24 s32e a11, a4, -20 srli a11, a3, 2 # shift windowbase by 2 rotw 2 _bnei a3, 1, .Lloop j .Lexit .Lc4: s32e a4, a9, -16 s32e a5, a9, -12 s32e a6, a9, -8 s32e a7, a9, -4 srli a7, a3, 1 rotw 1 _bnei a3, 1, .Lloop j .Lexit .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! /* 12-register frame (call12) */ l32e a0, a5, -12 s32e a8, a0, -48 mov a8, a0 s32e a9, a8, -44 s32e a10, a8, -40 s32e a11, a8, -36 s32e a12, a8, -32 s32e a13, a8, -28 s32e a14, a8, -24 s32e a15, a8, -20 srli a15, a3, 3 /* The stack pointer for a4..a7 is out of reach, so we rotate the * window, grab the stackpointer, and rotate back. * Alternatively, we could also use the following approach, but that * makes the fixup routine much more complicated: * rotw 1 * s32e a0, a13, -16 * ... * rotw 2 */ rotw 1 mov a4, a13 rotw -1 s32e a4, a8, -16 s32e a5, a8, -12 s32e a6, a8, -8 s32e a7, a8, -4 rotw 3 _beqi a3, 1, .Lexit j .Lloop .Lexit: /* Done. Do the final rotation and set WS */ rotw 1 rsr a3, windowbase ssl a3 movi a3, 1 sll a3, a3 wsr a3, windowstart .Lnospill: /* Advance PC, restore registers and SAR, and return from exception. */ l32i a3, a2, PT_SAR l32i a0, a2, PT_AREG0 wsr a3, sar l32i a3, a2, PT_AREG3 /* Restore clobbered registers. */ l32i a4, a2, PT_AREG4 l32i a7, a2, PT_AREG7 l32i a8, a2, PT_AREG8 l32i a11, a2, PT_AREG11 l32i a12, a2, PT_AREG12 l32i a15, a2, PT_AREG15 movi a2, 0 rfe .Linvalid_mask: /* We get here because of an unrecoverable error in the window * registers, so set up a dummy frame and kill the user application. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. */ movi a0, 1 movi a1, 0 wsr a0, windowstart wsr a1, windowbase rsync movi a0, 0 rsr a3, excsave1 l32i a1, a3, EXC_TABLE_KSTK movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL wsr a4, ps rsync movi abi_arg0, SIGSEGV abi_call make_task_dead /* shouldn't return, so panic */ wsr a0, excsave1 call0 unrecoverable_exception # should not return 1: j 1b ENDPROC(fast_syscall_spill_registers) /* Fixup handler. * * We get here if the spill routine causes an exception, e.g. tlb miss. * We basically restore WINDOWBASE and WINDOWSTART to the condition when * we entered the spill routine and jump to the user exception handler. * * Note that we only need to restore the bits in windowstart that have not * been spilled yet by the _spill_register routine. Luckily, a3 contains a * rotated windowstart with only those bits set for frames that haven't been * spilled yet. Because a3 is rotated such that bit 0 represents the register * frame for the current windowbase - 1, we need to rotate a3 left by the * value of the current windowbase + 1 and move it to windowstart. * * a0: value of depc, original value in depc * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable, original value in excsave1 */ ENTRY(fast_syscall_spill_registers_fixup) rsr a2, windowbase # get current windowbase (a2 is saved) xsr a0, depc # restore depc and a0 ssl a2 # set shift (32 - WB) /* We need to make sure the current registers (a0-a3) are preserved. * To do this, we simply set the bit for the current window frame * in WS, so that the exception handlers save them to the task stack. * * Note: we use a3 to set the windowbase, so we take a special care * of it, saving it in the original _spill_registers frame across * the exception handler call. */ xsr a3, excsave1 # get spill-mask slli a3, a3, 1 # shift left by one addi a3, a3, 1 # set the bit for the current window frame slli a2, a3, 32-WSBITS src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... wsr a2, windowstart # set corrected windowstart srli a3, a3, 1 rsr a2, excsave1 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 xsr a2, excsave1 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) xsr a2, excsave1 /* Return to the original (user task) WINDOWBASE. * We leave the following frame behind: * a0, a1, a2 same * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) * depc: depc (we have to return to that address) * excsave_1: exctable */ wsr a3, windowbase rsync /* We are now in the original frame when we entered _spill_registers: * a0: return address * a1: used, stack pointer * a2: kernel stack pointer * a3: available * depc: exception address * excsave: exctable * Note: This frame might be the same as above. */ /* Setup stack pointer. */ addi a2, a2, -PT_USER_SIZE s32i a0, a2, PT_AREG0 /* Make sure we return to this fixup handler. */ movi a3, fast_syscall_spill_registers_fixup_return s32i a3, a2, PT_DEPC # setup depc /* Jump to the exception handler. */ rsr a3, excsave1 rsr a0, exccause addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a3, a3, EXC_TABLE_DOUBLE_SAVE jx a0 ENDPROC(fast_syscall_spill_registers_fixup) ENTRY(fast_syscall_spill_registers_fixup_return) /* When we return here, all registers have been restored (a2: DEPC) */ wsr a2, depc # exception address /* Restore fixup handler. */ rsr a2, excsave1 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE movi a3, fast_syscall_spill_registers_fixup s32i a3, a2, EXC_TABLE_FIXUP rsr a3, windowbase s32i a3, a2, EXC_TABLE_PARAM l32i a2, a2, EXC_TABLE_KSTK /* Load WB at the time the exception occurred. */ rsr a3, sar # WB is still in SAR neg a3, a3 wsr a3, windowbase rsync rsr a3, excsave1 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE rfde ENDPROC(fast_syscall_spill_registers_fixup_return) #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ ENTRY(fast_syscall_spill_registers) l32i a0, a2, PT_AREG0 # restore a0 movi a2, -ENOSYS rfe ENDPROC(fast_syscall_spill_registers) #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ #ifdef CONFIG_MMU /* * We should never get here. Bail out! */ ENTRY(fast_second_level_miss_double_kernel) 1: call0 unrecoverable_exception # should not return 1: j 1b ENDPROC(fast_second_level_miss_double_kernel) /* First-level entry handler for user, kernel, and double 2nd-level * TLB miss exceptions. Note that for now, user and kernel miss * exceptions share the same entry point and are handled identically. * * An old, less-efficient C version of this function used to exist. * We include it below, interleaved as comments, for reference. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_second_level_miss) /* Save a1 and a3. Note: we don't expect a double exception. */ s32i a1, a2, PT_AREG1 s32i a3, a2, PT_AREG3 /* We need to map the page of PTEs for the user task. Find * the pointer to that page. Also, it's possible for tsk->mm * to be NULL while tsk->active_mm is nonzero if we faulted on * a vmalloc address. In that rare case, we must use * active_mm instead to avoid a fault in this handler. See * * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html * (or search Internet on "mm vs. active_mm") * * if (!mm) * mm = tsk->active_mm; * pgd = pgd_offset (mm, regs->excvaddr); * pmd = pmd_offset (pgd, regs->excvaddr); * pmdval = *pmd; */ GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, .Lfast_second_level_miss_no_mm .Lfast_second_level_miss_continue: rsr a3, excvaddr # fault address _PGD_OFFSET(a0, a3, a1) l32i a0, a0, 0 # read pmdval beqz a0, .Lfast_second_level_miss_no_pmd /* Read ptevaddr and convert to top of page-table page. * * vpnval = read_ptevaddr_register() & PAGE_MASK; * vpnval += DTLB_WAY_PGTABLE; * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); * write_dtlb_entry (pteval, vpnval); * * The messy computation for 'pteval' above really simplifies * into the following: * * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK) * | PAGE_DIRECTORY */ movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff add a0, a0, a1 # pmdval - PAGE_OFFSET extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK xor a0, a0, a1 movi a1, _PAGE_DIRECTORY or a0, a0, a1 # ... | PAGE_DIRECTORY /* * We utilize all three wired-ways (7-9) to hold pmd translations. * Memory regions are mapped to the DTLBs according to bits 28 and 29. * This allows to map the three most common regions to three different * DTLBs: * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) * 2 -> way 8 shared libaries (2000.0000) * 3 -> way 0 stack (3000.0000) */ extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 rsr a1, ptevaddr addx2 a3, a3, a3 # -> 0,3,6,9 srli a1, a1, PAGE_SHIFT extui a3, a3, 2, 2 # -> 0,0,1,2 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK addi a3, a3, DTLB_WAY_PGD add a1, a1, a3 # ... + way_number .Lfast_second_level_miss_wdtlb: wdtlb a0, a1 dsync /* Exit critical section. */ .Lfast_second_level_miss_skip_wdtlb: rsr a3, excsave1 movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a0, a2, PT_AREG0 l32i a1, a2, PT_AREG1 l32i a3, a2, PT_AREG3 l32i a2, a2, PT_DEPC bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f /* Restore excsave1 and return. */ rsr a2, depc rfe /* Return from double exception. */ 1: xsr a2, depc esync rfde .Lfast_second_level_miss_no_mm: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 bnez a0, .Lfast_second_level_miss_continue /* Even more unlikely case active_mm == 0. * We can get here with NMI in the middle of context_switch that * touches vmalloc area. */ movi a0, init_mm j .Lfast_second_level_miss_continue .Lfast_second_level_miss_no_pmd: #if (DCACHE_WAY_SIZE > PAGE_SIZE) /* Special case for cache aliasing. * We (should) only get here if a clear_user_page, copy_user_page * or the aliased cache flush functions got preemptively interrupted * by another task. Re-establish temporary mapping to the * TLBTEMP_BASE areas. */ /* We shouldn't be in a double exception */ l32i a0, a2, PT_DEPC bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lfast_second_level_miss_slow /* Make sure the exception originated in the special functions */ movi a0, __tlbtemp_mapping_start rsr a3, epc1 bltu a3, a0, .Lfast_second_level_miss_slow movi a0, __tlbtemp_mapping_end bgeu a3, a0, .Lfast_second_level_miss_slow /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ movi a3, TLBTEMP_BASE_1 rsr a0, excvaddr bltu a0, a3, .Lfast_second_level_miss_slow addi a1, a0, -TLBTEMP_SIZE bgeu a1, a3, .Lfast_second_level_miss_slow /* Check if we have to restore an ITLB mapping. */ movi a1, __tlbtemp_mapping_itlb rsr a3, epc1 sub a3, a3, a1 /* Calculate VPN */ movi a1, PAGE_MASK and a1, a1, a0 /* Jump for ITLB entry */ bgez a3, 1f /* We can use up to two TLBTEMP areas, one for src and one for dst. */ extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 add a1, a3, a1 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ mov a0, a6 movnez a0, a7, a3 j .Lfast_second_level_miss_wdtlb /* ITLB entry. We only use dst in a6. */ 1: witlb a6, a1 isync j .Lfast_second_level_miss_skip_wdtlb #endif // DCACHE_WAY_SIZE > PAGE_SIZE /* Invalid PGD, default exception handling */ .Lfast_second_level_miss_slow: rsr a1, depc s32i a1, a2, PT_AREG2 mov a1, a2 rsr a2, ps bbsi.l a2, PS_UM_BIT, 1f call0 _kernel_exception 1: call0 _user_exception ENDPROC(fast_second_level_miss) /* * StoreProhibitedException * * Update the pte and invalidate the itlb mapping for this pte. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_store_prohibited) /* Save a1 and a3. */ s32i a1, a2, PT_AREG1 s32i a3, a2, PT_AREG3 GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, .Lfast_store_no_mm .Lfast_store_continue: rsr a1, excvaddr # fault address _PGD_OFFSET(a0, a1, a3) l32i a0, a0, 0 beqz a0, .Lfast_store_slow /* * Note that we test _PAGE_WRITABLE_BIT only if PTE is present * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. */ _PTE_OFFSET(a0, a1, a3) l32i a3, a0, 0 # read pteval movi a1, _PAGE_CA_INVALID ball a3, a1, .Lfast_store_slow bbci.l a3, _PAGE_WRITABLE_BIT, .Lfast_store_slow movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE or a3, a3, a1 rsr a1, excvaddr s32i a3, a0, 0 /* We need to flush the cache if we have page coloring. */ #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK dhwb a0, 0 #endif pdtlb a0, a1 wdtlb a3, a0 /* Exit critical section. */ movi a0, 0 rsr a3, excsave1 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a3, a2, PT_AREG3 l32i a1, a2, PT_AREG1 l32i a0, a2, PT_AREG0 l32i a2, a2, PT_DEPC bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f rsr a2, depc rfe /* Double exception. Restore FIXUP handler and return. */ 1: xsr a2, depc esync rfde .Lfast_store_no_mm: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 j .Lfast_store_continue /* If there was a problem, handle fault in C */ .Lfast_store_slow: rsr a1, excvaddr pdtlb a0, a1 bbci.l a0, DTLB_HIT_BIT, 1f idtlb a0 1: rsr a3, depc # still holds a2 s32i a3, a2, PT_AREG2 mov a1, a2 rsr a2, ps bbsi.l a2, PS_UM_BIT, 1f call0 _kernel_exception 1: call0 _user_exception ENDPROC(fast_store_prohibited) #endif /* CONFIG_MMU */ .text /* * System Calls. * * void system_call (struct pt_regs* regs, int exccause) * a2 a3 */ .literal_position ENTRY(system_call) #if defined(__XTENSA_WINDOWED_ABI__) abi_entry_default #elif defined(__XTENSA_CALL0_ABI__) abi_entry(12) s32i a0, sp, 0 s32i abi_saved0, sp, 4 s32i abi_saved1, sp, 8 mov abi_saved0, a2 #else #error Unsupported Xtensa ABI #endif /* regs->syscall = regs->areg[2] */ l32i a7, abi_saved0, PT_AREG2 s32i a7, abi_saved0, PT_SYSCALL GET_THREAD_INFO(a4, a1) l32i abi_saved1, a4, TI_FLAGS movi a4, _TIF_WORK_MASK and abi_saved1, abi_saved1, a4 beqz abi_saved1, 1f mov abi_arg0, abi_saved0 abi_call do_syscall_trace_enter beqz abi_rv, .Lsyscall_exit l32i a7, abi_saved0, PT_SYSCALL 1: /* syscall = sys_call_table[syscall_nr] */ movi a4, sys_call_table movi a5, __NR_syscalls movi abi_rv, -ENOSYS bgeu a7, a5, 1f addx4 a4, a7, a4 l32i abi_tmp0, a4, 0 /* Load args: arg0 - arg5 are passed via regs. */ l32i abi_arg0, abi_saved0, PT_AREG6 l32i abi_arg1, abi_saved0, PT_AREG3 l32i abi_arg2, abi_saved0, PT_AREG4 l32i abi_arg3, abi_saved0, PT_AREG5 l32i abi_arg4, abi_saved0, PT_AREG8 l32i abi_arg5, abi_saved0, PT_AREG9 abi_callx abi_tmp0 1: /* regs->areg[2] = return_value */ s32i abi_rv, abi_saved0, PT_AREG2 bnez abi_saved1, 1f .Lsyscall_exit: #if defined(__XTENSA_WINDOWED_ABI__) abi_ret_default #elif defined(__XTENSA_CALL0_ABI__) l32i a0, sp, 0 l32i abi_saved0, sp, 4 l32i abi_saved1, sp, 8 abi_ret(12) #else #error Unsupported Xtensa ABI #endif 1: mov abi_arg0, abi_saved0 abi_call do_syscall_trace_leave j .Lsyscall_exit ENDPROC(system_call) /* * Spill live registers on the kernel stack macro. * * Entry condition: ps.woe is set, ps.excm is cleared * Exit condition: windowstart has single bit set * May clobber: a12, a13 */ .macro spill_registers_kernel #if XCHAL_NUM_AREGS > 16 call12 1f _j 2f retw .align 4 1: _entry a1, 48 addi a12, a0, 3 #if XCHAL_NUM_AREGS > 32 .rept (XCHAL_NUM_AREGS - 32) / 12 _entry a1, 48 mov a12, a0 .endr #endif _entry a1, 16 #if XCHAL_NUM_AREGS % 12 == 0 mov a8, a8 #elif XCHAL_NUM_AREGS % 12 == 4 mov a12, a12 #elif XCHAL_NUM_AREGS % 12 == 8 mov a4, a4 #endif retw 2: #else mov a12, a12 #endif .endm /* * Task switch. * * struct task* _switch_to (struct task* prev, struct task* next) * a2 a2 a3 */ ENTRY(_switch_to) #if defined(__XTENSA_WINDOWED_ABI__) abi_entry(XTENSA_SPILL_STACK_RESERVE) #elif defined(__XTENSA_CALL0_ABI__) abi_entry(16) s32i a12, sp, 0 s32i a13, sp, 4 s32i a14, sp, 8 s32i a15, sp, 12 #else #error Unsupported Xtensa ABI #endif mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO l32i a5, a3, TASK_THREAD_INFO save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER #if THREAD_RA > 1020 || THREAD_SP > 1020 addi a10, a2, TASK_THREAD s32i a0, a10, THREAD_RA - TASK_THREAD # save return address s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer #else s32i a0, a2, THREAD_RA # save return address s32i a1, a2, THREAD_SP # save stack pointer #endif #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) movi a6, __stack_chk_guard l32i a8, a3, TASK_STACK_CANARY s32i a8, a6, 0 #endif /* Disable ints while we manipulate the stack pointer. */ irq_save a14, a3 rsync /* Switch CPENABLE */ #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) l32i a3, a5, THREAD_CPENABLE #ifdef CONFIG_SMP beqz a3, 1f memw # pairs with memw (2) in fast_coprocessor l32i a6, a5, THREAD_CP_OWNER_CPU l32i a7, a5, THREAD_CPU beq a6, a7, 1f # load 0 into CPENABLE if current CPU is not the owner movi a3, 0 1: #endif wsr a3, cpenable #endif #if XCHAL_HAVE_EXCLUSIVE l32i a3, a5, THREAD_ATOMCTL8 getex a3 s32i a3, a4, THREAD_ATOMCTL8 #endif /* Flush register file. */ #if defined(__XTENSA_WINDOWED_ABI__) spill_registers_kernel #endif /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten * because the kernel stack will only be loaded again after * we return from kernel space. */ rsr a3, excsave1 # exc_table addi a7, a5, PT_REGS_OFFSET s32i a7, a3, EXC_TABLE_KSTK /* restore context of the task 'next' */ l32i a0, a11, THREAD_RA # restore return address l32i a1, a11, THREAD_SP # restore stack pointer load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER wsr a14, ps rsync #if defined(__XTENSA_WINDOWED_ABI__) abi_ret(XTENSA_SPILL_STACK_RESERVE) #elif defined(__XTENSA_CALL0_ABI__) l32i a12, sp, 0 l32i a13, sp, 4 l32i a14, sp, 8 l32i a15, sp, 12 abi_ret(16) #else #error Unsupported Xtensa ABI #endif ENDPROC(_switch_to) ENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) * Note: prev is still in abi_arg0 (return value from fake call frame) */ abi_call schedule_tail mov abi_arg0, a1 abi_call do_syscall_trace_leave j common_exception_return ENDPROC(ret_from_fork) /* * Kernel thread creation helper * On entry, set up by copy_thread: abi_saved0 = thread_fn, * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev */ ENTRY(ret_from_kernel_thread) abi_call schedule_tail mov abi_arg0, abi_saved1 abi_callx abi_saved0 j common_exception_return ENDPROC(ret_from_kernel_thread) #ifdef CONFIG_HIBERNATION .section .bss, "aw" .align 4 .Lsaved_regs: #if defined(__XTENSA_WINDOWED_ABI__) .fill 2, 4 #elif defined(__XTENSA_CALL0_ABI__) .fill 6, 4 #else #error Unsupported Xtensa ABI #endif .align XCHAL_NCP_SA_ALIGN .Lsaved_user_regs: .fill XTREGS_USER_SIZE, 1 .previous ENTRY(swsusp_arch_suspend) abi_entry_default movi a2, .Lsaved_regs movi a3, .Lsaved_user_regs s32i a0, a2, 0 s32i a1, a2, 4 save_xtregs_user a3 a4 a5 a6 a7 a8 0 #if defined(__XTENSA_WINDOWED_ABI__) spill_registers_kernel #elif defined(__XTENSA_CALL0_ABI__) s32i a12, a2, 8 s32i a13, a2, 12 s32i a14, a2, 16 s32i a15, a2, 20 #else #error Unsupported Xtensa ABI #endif abi_call swsusp_save mov a2, abi_rv abi_ret_default ENDPROC(swsusp_arch_suspend) ENTRY(swsusp_arch_resume) abi_entry_default #if defined(__XTENSA_WINDOWED_ABI__) spill_registers_kernel #endif movi a2, restore_pblist l32i a2, a2, 0 .Lcopy_pbe: l32i a3, a2, PBE_ADDRESS l32i a4, a2, PBE_ORIG_ADDRESS __loopi a3, a9, PAGE_SIZE, 16 l32i a5, a3, 0 l32i a6, a3, 4 l32i a7, a3, 8 l32i a8, a3, 12 addi a3, a3, 16 s32i a5, a4, 0 s32i a6, a4, 4 s32i a7, a4, 8 s32i a8, a4, 12 addi a4, a4, 16 __endl a3, a9 l32i a2, a2, PBE_NEXT bnez a2, .Lcopy_pbe movi a2, .Lsaved_regs movi a3, .Lsaved_user_regs l32i a0, a2, 0 l32i a1, a2, 4 load_xtregs_user a3 a4 a5 a6 a7 a8 0 #if defined(__XTENSA_CALL0_ABI__) l32i a12, a2, 8 l32i a13, a2, 12 l32i a14, a2, 16 l32i a15, a2, 20 #endif movi a2, 0 abi_ret_default ENDPROC(swsusp_arch_resume) #endif
aixcc-public/challenge-001-exemplar-source
1,341
arch/xtensa/kernel/mxhead.S
/* * Xtensa Secondary Processors startup code. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2013 Tensilica Inc. * * Joe Taylor <joe@tensilica.com> * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Pete Delaney <piet@tensilica.com> */ #include <linux/linkage.h> #include <asm/cacheasm.h> #include <asm/initialize_mmu.h> #include <asm/mxregs.h> #include <asm/regs.h> .section .SecondaryResetVector.text, "ax" ENTRY(_SecondaryResetVector) _j _SetupOCD .begin no-absolute-literals .literal_position _SetupOCD: /* * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ #if XCHAL_HAVE_WINDOWED movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync #endif movi a1, LOCKLEVEL wsr a1, ps rsync _SetupMMU: #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #endif /* * Start Secondary Processors with NULL pointer to boot params. */ movi a2, 0 # a2 == NULL movi a3, _startup jx a3 .end no-absolute-literals
aixcc-public/challenge-001-exemplar-source
6,815
arch/xtensa/kernel/coprocessor.S
/* * arch/xtensa/kernel/coprocessor.S * * Xtensa processor configuration-specific table of coprocessor and * other custom register layout information. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 - 2007 Tensilica Inc. */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/coprocessor.h> #include <asm/current.h> #include <asm/regs.h> /* * Rules for coprocessor state manipulation on SMP: * * - a task may have live coprocessors only on one CPU. * * - whether coprocessor context of task T is live on some CPU is * denoted by T's thread_info->cpenable. * * - non-zero thread_info->cpenable means that thread_info->cp_owner_cpu * is valid in the T's thread_info. Zero thread_info->cpenable means that * coprocessor context is valid in the T's thread_info. * * - if a coprocessor context of task T is live on CPU X, only CPU X changes * T's thread_info->cpenable, cp_owner_cpu and coprocessor save area. * This is done by making sure that for the task T with live coprocessor * on CPU X cpenable SR is 0 when T runs on any other CPU Y. * When fast_coprocessor exception is taken on CPU Y it goes to the * C-level do_coprocessor that uses IPI to make CPU X flush T's coprocessors. */ #if XTENSA_HAVE_COPROCESSORS /* * Macros for lazy context switch. */ #define SAVE_CP_REGS(x) \ .if XTENSA_HAVE_COPROCESSOR(x); \ .align 4; \ .Lsave_cp_regs_cp##x: \ xchal_cp##x##_store a2 a3 a4 a5 a6; \ ret; \ .endif #define LOAD_CP_REGS(x) \ .if XTENSA_HAVE_COPROCESSOR(x); \ .align 4; \ .Lload_cp_regs_cp##x: \ xchal_cp##x##_load a2 a3 a4 a5 a6; \ ret; \ .endif #define CP_REGS_TAB(x) \ .if XTENSA_HAVE_COPROCESSOR(x); \ .long .Lsave_cp_regs_cp##x; \ .long .Lload_cp_regs_cp##x; \ .else; \ .long 0, 0; \ .endif; \ .long THREAD_XTREGS_CP##x #define CP_REGS_TAB_SAVE 0 #define CP_REGS_TAB_LOAD 4 #define CP_REGS_TAB_OFFSET 8 __XTENSA_HANDLER SAVE_CP_REGS(0) SAVE_CP_REGS(1) SAVE_CP_REGS(2) SAVE_CP_REGS(3) SAVE_CP_REGS(4) SAVE_CP_REGS(5) SAVE_CP_REGS(6) SAVE_CP_REGS(7) LOAD_CP_REGS(0) LOAD_CP_REGS(1) LOAD_CP_REGS(2) LOAD_CP_REGS(3) LOAD_CP_REGS(4) LOAD_CP_REGS(5) LOAD_CP_REGS(6) LOAD_CP_REGS(7) .align 4 .Lcp_regs_jump_table: CP_REGS_TAB(0) CP_REGS_TAB(1) CP_REGS_TAB(2) CP_REGS_TAB(3) CP_REGS_TAB(4) CP_REGS_TAB(5) CP_REGS_TAB(6) CP_REGS_TAB(7) /* * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_coprocessor) s32i a3, a2, PT_AREG3 #ifdef CONFIG_SMP /* * Check if any coprocessor context is live on another CPU * and if so go through the C-level coprocessor exception handler * to flush it to memory. */ GET_THREAD_INFO (a0, a2) l32i a3, a0, THREAD_CPENABLE beqz a3, .Lload_local /* * Pairs with smp_wmb in local_coprocessor_release_all * and with both memws below. */ memw l32i a3, a0, THREAD_CPU l32i a0, a0, THREAD_CP_OWNER_CPU beq a0, a3, .Lload_local rsr a0, ps l32i a3, a2, PT_AREG3 bbci.l a0, PS_UM_BIT, 1f call0 user_exception 1: call0 kernel_exception #endif /* Save remaining registers a1-a3 and SAR */ .Lload_local: rsr a3, sar s32i a1, a2, PT_AREG1 s32i a3, a2, PT_SAR mov a1, a2 rsr a2, depc s32i a2, a1, PT_AREG2 /* The hal macros require up to 4 temporary registers. We use a3..a6. */ s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ rsr a3, exccause addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/ ssl a3 # SAR: 32 - coprocessor_number movi a2, 1 rsr a0, cpenable sll a2, a2 or a0, a0, a2 wsr a0, cpenable rsync /* Get coprocessor save/load table entry (a7). */ movi a7, .Lcp_regs_jump_table addx8 a7, a3, a7 addx4 a7, a3, a7 /* Retrieve previous owner (a8). */ rsr a0, excsave1 # exc_table addx4 a0, a3, a0 # entry for CP l32i a8, a0, EXC_TABLE_COPROCESSOR_OWNER /* Set new owner (a9). */ GET_THREAD_INFO (a9, a1) l32i a4, a9, THREAD_CPU s32i a9, a0, EXC_TABLE_COPROCESSOR_OWNER s32i a4, a9, THREAD_CP_OWNER_CPU /* * Enable coprocessor for the new owner. (a2 = 1 << CP number) * This can be done before loading context into the coprocessor. */ l32i a4, a9, THREAD_CPENABLE or a4, a4, a2 /* * Make sure THREAD_CP_OWNER_CPU is in memory before updating * THREAD_CPENABLE */ memw # (2) s32i a4, a9, THREAD_CPENABLE beqz a8, 1f # skip 'save' if no previous owner /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */ l32i a10, a8, THREAD_CPENABLE xor a10, a10, a2 /* Get context save area and call save routine. */ l32i a2, a7, CP_REGS_TAB_OFFSET l32i a3, a7, CP_REGS_TAB_SAVE add a2, a2, a8 callx0 a3 /* * Make sure coprocessor context and THREAD_CP_OWNER_CPU are in memory * before updating THREAD_CPENABLE */ memw # (3) s32i a10, a8, THREAD_CPENABLE 1: /* Get context save area and call load routine. */ l32i a2, a7, CP_REGS_TAB_OFFSET l32i a3, a7, CP_REGS_TAB_LOAD add a2, a2, a9 callx0 a3 /* Restore all registers and return from exception handler. */ l32i a10, a1, PT_AREG10 l32i a9, a1, PT_AREG9 l32i a8, a1, PT_AREG8 l32i a7, a1, PT_AREG7 l32i a6, a1, PT_AREG6 l32i a5, a1, PT_AREG5 l32i a4, a1, PT_AREG4 l32i a0, a1, PT_SAR l32i a3, a1, PT_AREG3 l32i a2, a1, PT_AREG2 wsr a0, sar l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfe ENDPROC(fast_coprocessor) .text /* * coprocessor_flush(struct thread_info*, index) * a2 a3 * * Save coprocessor registers for coprocessor 'index'. * The register values are saved to or loaded from the coprocessor area * inside the task_info structure. * * Note that this function doesn't update the coprocessor_owner information! * */ ENTRY(coprocessor_flush) abi_entry_default movi a4, .Lcp_regs_jump_table addx8 a4, a3, a4 addx4 a3, a3, a4 l32i a4, a3, CP_REGS_TAB_SAVE beqz a4, 1f l32i a3, a3, CP_REGS_TAB_OFFSET add a2, a2, a3 mov a7, a0 callx0 a4 mov a0, a7 1: abi_ret_default ENDPROC(coprocessor_flush) #endif /* XTENSA_HAVE_COPROCESSORS */
aixcc-public/challenge-001-exemplar-source
1,651
arch/xtensa/kernel/mcount.S
/* * arch/xtensa/kernel/mcount.S * * Xtensa specific mcount support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2013 Tensilica Inc. */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/ftrace.h> /* * Entry condition: * * a2: a0 of the caller in windowed ABI * a10: a0 of the caller in call0 ABI * * In call0 ABI the function _mcount is called with the special ABI: * its argument is in a10 and all the usual argument registers (a2 - a7) * must be preserved in addition to callee-saved a12 - a15. */ ENTRY(_mcount) #if defined(__XTENSA_WINDOWED_ABI__) abi_entry_default movi a4, ftrace_trace_function l32i a4, a4, 0 movi a3, ftrace_stub bne a3, a4, 1f abi_ret_default 1: xor a7, a2, a1 movi a3, 0x3fffffff and a7, a7, a3 xor a7, a7, a1 xor a6, a0, a1 and a6, a6, a3 xor a6, a6, a1 addi a6, a6, -MCOUNT_INSN_SIZE callx4 a4 abi_ret_default #elif defined(__XTENSA_CALL0_ABI__) abi_entry_default movi a9, ftrace_trace_function l32i a9, a9, 0 movi a11, ftrace_stub bne a9, a11, 1f abi_ret_default 1: abi_entry(28) s32i a0, sp, 0 s32i a2, sp, 4 s32i a3, sp, 8 s32i a4, sp, 12 s32i a5, sp, 16 s32i a6, sp, 20 s32i a7, sp, 24 addi a2, a10, -MCOUNT_INSN_SIZE callx0 a9 l32i a0, sp, 0 l32i a2, sp, 4 l32i a3, sp, 8 l32i a4, sp, 12 l32i a5, sp, 16 l32i a6, sp, 20 l32i a7, sp, 24 abi_ret(28) #else #error Unsupported Xtensa ABI #endif ENDPROC(_mcount) ENTRY(ftrace_stub) abi_entry_default abi_ret_default ENDPROC(ftrace_stub)
aixcc-public/challenge-001-exemplar-source
9,036
arch/xtensa/kernel/vmlinux.lds.S
/* * arch/xtensa/kernel/vmlinux.lds.S * * Xtensa linker script * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> */ #define RO_EXCEPTION_TABLE_ALIGN 16 #include <asm-generic/vmlinux.lds.h> #include <asm/page.h> #include <asm/thread_info.h> #include <asm/core.h> #include <asm/vectors.h> OUTPUT_ARCH(xtensa) ENTRY(_start) #ifdef __XTENSA_EB__ jiffies = jiffies_64 + 4; #else jiffies = jiffies_64; #endif /* Note: In the following macros, it would be nice to specify only the vector name and section kind and construct "sym" and "section" using CPP concatenation, but that does not work reliably. Concatenating a string with "." produces an invalid token. CPP will not print a warning because it thinks this is an assembly file, but it leaves them as multiple tokens and there may or may not be whitespace between them. */ /* Macro for a relocation entry */ #define RELOCATE_ENTRY(sym, section) \ LONG(sym ## _start); \ LONG(sym ## _end); \ LONG(LOADADDR(section)) #if !defined(CONFIG_VECTORS_ADDR) && XCHAL_HAVE_VECBASE #define MERGED_VECTORS 1 #else #define MERGED_VECTORS 0 #endif /* * Macro to define a section for a vector. When MERGED_VECTORS is 0 * code for every vector is located with other init data. At startup * time head.S copies code for every vector to its final position according * to description recorded in the corresponding RELOCATE_ENTRY. */ #define SECTION_VECTOR4(sym, section, addr, prevsec) \ section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ { \ . = ALIGN(4); \ sym ## _start = ABSOLUTE(.); \ *(section) \ sym ## _end = ABSOLUTE(.); \ } #define SECTION_VECTOR2(section, addr) \ . = addr; \ *(section) /* * Mapping of input sections to output sections when linking. */ SECTIONS { . = KERNELOFFSET; /* .text section */ _text = .; _stext = .; .text : { /* The HEAD_TEXT section must be the first section! */ HEAD_TEXT #if MERGED_VECTORS . = ALIGN(PAGE_SIZE); _vecbase = .; #ifdef SUPPORT_WINDOWED SECTION_VECTOR2 (.WindowVectors.text, WINDOW_VECTORS_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR2 (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 3 SECTION_VECTOR2 (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 4 SECTION_VECTOR2 (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 5 SECTION_VECTOR2 (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR2 (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) #endif SECTION_VECTOR2 (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) SECTION_VECTOR2 (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) SECTION_VECTOR2 (.UserExceptionVector.text, USER_VECTOR_VADDR) SECTION_VECTOR2 (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) *(.exception.text) #endif IRQENTRY_TEXT SOFTIRQENTRY_TEXT ENTRY_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT *(.fixup) } _etext = .; PROVIDE (etext = .); . = ALIGN(16); RO_DATA(4096) /* Data section */ #ifdef CONFIG_XIP_KERNEL INIT_TEXT_SECTION(PAGE_SIZE) #else _sdata = .; RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) _edata = .; /* Initialization code and data: */ . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) .init.data : { INIT_DATA } #endif .init.rodata : { . = ALIGN(0x4); __tagtable_begin = .; *(.taglist) __tagtable_end = .; . = ALIGN(16); __boot_reloc_table_start = ABSOLUTE(.); #if !MERGED_VECTORS #ifdef SUPPORT_WINDOWED RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); #endif #if XCHAL_EXCM_LEVEL >= 2 RELOCATE_ENTRY(_Level2InterruptVector_text, .Level2InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 3 RELOCATE_ENTRY(_Level3InterruptVector_text, .Level3InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 4 RELOCATE_ENTRY(_Level4InterruptVector_text, .Level4InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 5 RELOCATE_ENTRY(_Level5InterruptVector_text, .Level5InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 6 RELOCATE_ENTRY(_Level6InterruptVector_text, .Level6InterruptVector.text); #endif RELOCATE_ENTRY(_KernelExceptionVector_text, .KernelExceptionVector.text); RELOCATE_ENTRY(_UserExceptionVector_text, .UserExceptionVector.text); RELOCATE_ENTRY(_DoubleExceptionVector_text, .DoubleExceptionVector.text); RELOCATE_ENTRY(_DebugInterruptVector_text, .DebugInterruptVector.text); RELOCATE_ENTRY(_exception_text, .exception.text); #endif #ifdef CONFIG_XIP_KERNEL RELOCATE_ENTRY(_xip_data, .data); RELOCATE_ENTRY(_xip_init_data, .init.data); #endif #if defined(CONFIG_SECONDARY_RESET_VECTOR) RELOCATE_ENTRY(_SecondaryResetVector_text, .SecondaryResetVector.text); #endif __boot_reloc_table_end = ABSOLUTE(.) ; INIT_SETUP(XCHAL_ICACHE_LINESIZE) INIT_CALLS CON_INITCALL INIT_RAM_FS } PERCPU_SECTION(XCHAL_ICACHE_LINESIZE) /* We need this dummy segment here */ . = ALIGN(4); .dummy : { LONG(0) } #undef LAST #define LAST .dummy #if !MERGED_VECTORS /* The vectors are relocated to the real position at startup time */ #ifdef SUPPORT_WINDOWED SECTION_VECTOR4 (_WindowVectors_text, .WindowVectors.text, WINDOW_VECTORS_VADDR, LAST) #undef LAST #define LAST .WindowVectors.text #endif SECTION_VECTOR4 (_DebugInterruptVector_text, .DebugInterruptVector.text, DEBUG_VECTOR_VADDR, LAST) #undef LAST #define LAST .DebugInterruptVector.text #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR4 (_Level2InterruptVector_text, .Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level2InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 3 SECTION_VECTOR4 (_Level3InterruptVector_text, .Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level3InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 4 SECTION_VECTOR4 (_Level4InterruptVector_text, .Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level4InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 5 SECTION_VECTOR4 (_Level5InterruptVector_text, .Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level5InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR4 (_Level6InterruptVector_text, .Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level6InterruptVector.text #endif SECTION_VECTOR4 (_KernelExceptionVector_text, .KernelExceptionVector.text, KERNEL_VECTOR_VADDR, LAST) #undef LAST SECTION_VECTOR4 (_UserExceptionVector_text, .UserExceptionVector.text, USER_VECTOR_VADDR, .KernelExceptionVector.text) SECTION_VECTOR4 (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, .UserExceptionVector.text) #define LAST .DoubleExceptionVector.text #endif #if defined(CONFIG_SECONDARY_RESET_VECTOR) SECTION_VECTOR4 (_SecondaryResetVector_text, .SecondaryResetVector.text, RESET_VECTOR1_VADDR, LAST) #undef LAST #define LAST .SecondaryResetVector.text #endif #if !MERGED_VECTORS SECTION_VECTOR4 (_exception_text, .exception.text, , LAST) #undef LAST #define LAST .exception.text #endif . = (LOADADDR(LAST) + SIZEOF(LAST) + 3) & ~ 3; .dummy1 : AT(ADDR(.dummy1)) { LONG(0) } . = ALIGN(PAGE_SIZE); #ifndef CONFIG_XIP_KERNEL __init_end = .; BSS_SECTION(0, 8192, 0) #endif _end = .; #ifdef CONFIG_XIP_KERNEL . = CONFIG_XIP_DATA_ADDR; _xip_start = .; #undef LOAD_OFFSET #define LOAD_OFFSET \ (CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy1) + SIZEOF(.dummy1) + 3) & ~ 3) _xip_data_start = .; _sdata = .; RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) _edata = .; _xip_data_end = .; /* Initialization data: */ STRUCT_ALIGN(); _xip_init_data_start = .; __init_begin = .; .init.data : { INIT_DATA } _xip_init_data_end = .; __init_end = .; BSS_SECTION(0, 8192, 0) _xip_end = .; #undef LOAD_OFFSET #endif DWARF_DEBUG .xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) } .xt.insn 0 : { KEEP(*(.xt.insn .xt.insn.* .gnu.linkonce.x*)) } .xt.lit 0 : { KEEP(*(.xt.lit .xt.lit.* .gnu.linkonce.p*)) } /* Sections to be discarded */ DISCARDS }
aixcc-public/challenge-001-exemplar-source
12,272
arch/xtensa/kernel/align.S
/* * arch/xtensa/kernel/align.S * * Handle unalignment exceptions in kernel space. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2001 - 2005 Tensilica, Inc. * Copyright (C) 2014 Cadence Design Systems Inc. * * Rewritten by Chris Zankel <chris@zankel.net> * * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca> */ #include <linux/linkage.h> #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/processor.h> #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION /* First-level exception handler for unaligned exceptions. * * Note: This handler works only for kernel exceptions. Unaligned user * access should get a seg fault. */ /* Big and little endian 16-bit values are located in * different halves of a register. HWORD_START helps to * abstract the notion of extracting a 16-bit value from a * register. * We also have to define new shifting instructions because * lsb and msb are on 'opposite' ends in a register for * different endian machines. * * Assume a memory region in ascending address: * 0 1 2 3|4 5 6 7 * * When loading one word into a register, the content of that register is: * LE 3 2 1 0, 7 6 5 4 * BE 0 1 2 3, 4 5 6 7 * * Masking the bits of the higher/lower address means: * LE X X 0 0, 0 0 X X * BE 0 0 X X, X X 0 0 * * Shifting to higher/lower addresses, means: * LE shift left / shift right * BE shift right / shift left * * Extracting 16 bits from a 32 bit reg. value to higher/lower address means: * LE mask 0 0 X X / shift left * BE shift left / mask 0 0 X X */ #if XCHAL_HAVE_WINDOWED #define UNALIGNED_USER_EXCEPTION #endif #if XCHAL_HAVE_BE #define HWORD_START 16 #define INSN_OP0 28 #define INSN_T 24 #define INSN_OP1 16 .macro __ssa8r r; ssa8l \r; .endm .macro __sh r, s; srl \r, \s; .endm .macro __sl r, s; sll \r, \s; .endm .macro __exth r, s; extui \r, \s, 0, 16; .endm .macro __extl r, s; slli \r, \s, 16; .endm #else #define HWORD_START 0 #define INSN_OP0 0 #define INSN_T 4 #define INSN_OP1 12 .macro __ssa8r r; ssa8b \r; .endm .macro __sh r, s; sll \r, \s; .endm .macro __sl r, s; srl \r, \s; .endm .macro __exth r, s; slli \r, \s, 16; .endm .macro __extl r, s; extui \r, \s, 0, 16; .endm #endif /* * xxxx xxxx = imm8 field * yyyy = imm4 field * ssss = s field * tttt = t field * * 16 0 * ------------------- * L32I.N yyyy ssss tttt 1000 * S32I.N yyyy ssss tttt 1001 * * 23 0 * ----------------------------- * res 0000 0010 * L16UI xxxx xxxx 0001 ssss tttt 0010 * L32I xxxx xxxx 0010 ssss tttt 0010 * XXX 0011 ssss tttt 0010 * XXX 0100 ssss tttt 0010 * S16I xxxx xxxx 0101 ssss tttt 0010 * S32I xxxx xxxx 0110 ssss tttt 0010 * XXX 0111 ssss tttt 0010 * XXX 1000 ssss tttt 0010 * L16SI xxxx xxxx 1001 ssss tttt 0010 * XXX 1010 0010 * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported * XXX 1100 0010 * XXX 1101 0010 * XXX 1110 0010 * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported * ----------------------------- * ^ ^ ^ * sub-opcode (NIBBLE_R) -+ | | * t field (NIBBLE_T) -----------+ | * major opcode (NIBBLE_OP0) --------------+ */ #define OP0_L32I_N 0x8 /* load immediate narrow */ #define OP0_S32I_N 0x9 /* store immediate narrow */ #define OP1_SI_MASK 0x4 /* OP1 bit set for stores */ #define OP1_SI_BIT 2 /* OP1 bit number for stores */ #define OP1_L32I 0x2 #define OP1_L16UI 0x1 #define OP1_L16SI 0x9 #define OP1_L32AI 0xb #define OP1_S32I 0x6 #define OP1_S16I 0x5 #define OP1_S32RI 0xf /* * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ .literal_position ENTRY(fast_unaligned) /* Note: We don't expect the address to be aligned on a word * boundary. After all, the processor generated that exception * and it would be a hardware fault. */ /* Save some working register */ s32i a4, a2, PT_AREG4 s32i a5, a2, PT_AREG5 s32i a6, a2, PT_AREG6 s32i a7, a2, PT_AREG7 s32i a8, a2, PT_AREG8 rsr a0, depc s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 rsr a3, excsave1 movi a4, fast_unaligned_fixup s32i a4, a3, EXC_TABLE_FIXUP /* Keep value of SAR in a0 */ rsr a0, sar rsr a8, excvaddr # load unaligned memory address /* Now, identify one of the following load/store instructions. * * The only possible danger of a double exception on the * following l32i instructions is kernel code in vmalloc * memory. The processor was just executing at the EPC_1 * address, and indeed, already fetched the instruction. That * guarantees a TLB mapping, which hasn't been replaced by * this unaligned exception handler that uses only static TLB * mappings. However, high-level interrupt handlers might * modify TLB entries, so for the generic case, we register a * TABLE_FIXUP handler here, too. */ /* a3...a6 saved on stack, a2 = SP */ /* Extract the instruction that caused the unaligned access. */ rsr a7, epc1 # load exception address movi a3, ~3 and a3, a3, a7 # mask lower bits l32i a4, a3, 0 # load 2 words l32i a5, a3, 4 __ssa8 a7 __src_b a4, a4, a5 # a4 has the instruction /* Analyze the instruction (load or store?). */ extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble #if XCHAL_HAVE_DENSITY _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump addi a6, a5, -OP0_S32I_N _beqz a6, .Lstore # S32I.N, do a store #endif /* 'store indicator bit' not set, jump */ _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload /* Store: Jump to table entry to get the value in the source register.*/ .Lstore:movi a5, .Lstore_table # table extui a6, a4, INSN_T, 4 # get source register addx8 a5, a6, a5 jx a5 # jump into table /* Load: Load memory address. */ .Lload: movi a3, ~3 and a3, a3, a8 # align memory address __ssa8 a8 #ifdef UNALIGNED_USER_EXCEPTION addi a3, a3, 8 l32e a5, a3, -8 l32e a6, a3, -4 #else l32i a5, a3, 0 l32i a6, a3, 4 #endif __src_b a3, a5, a6 # a3 has the data word #if XCHAL_HAVE_DENSITY addi a7, a7, 2 # increment PC (assume 16-bit insn) extui a5, a4, INSN_OP0, 4 _beqi a5, OP0_L32I_N, 1f # l32i.n: jump addi a7, a7, 1 #else addi a7, a7, 3 #endif extui a5, a4, INSN_OP1, 4 _beqi a5, OP1_L32I, 1f # l32i: jump extui a3, a3, 0, 16 # extract lower 16 bits _beqi a5, OP1_L16UI, 1f addi a5, a5, -OP1_L16SI _bnez a5, .Linvalid_instruction_load /* sign extend value */ slli a3, a3, 16 srai a3, a3, 16 /* Set target register. */ 1: extui a4, a4, INSN_T, 4 # extract target register movi a5, .Lload_table addx8 a4, a4, a5 jx a4 # jump to entry for target register .align 8 .Lload_table: s32i a3, a2, PT_AREG0; _j .Lexit; .align 8 mov a1, a3; _j .Lexit; .align 8 # fishy?? s32i a3, a2, PT_AREG2; _j .Lexit; .align 8 s32i a3, a2, PT_AREG3; _j .Lexit; .align 8 s32i a3, a2, PT_AREG4; _j .Lexit; .align 8 s32i a3, a2, PT_AREG5; _j .Lexit; .align 8 s32i a3, a2, PT_AREG6; _j .Lexit; .align 8 s32i a3, a2, PT_AREG7; _j .Lexit; .align 8 s32i a3, a2, PT_AREG8; _j .Lexit; .align 8 mov a9, a3 ; _j .Lexit; .align 8 mov a10, a3 ; _j .Lexit; .align 8 mov a11, a3 ; _j .Lexit; .align 8 mov a12, a3 ; _j .Lexit; .align 8 mov a13, a3 ; _j .Lexit; .align 8 mov a14, a3 ; _j .Lexit; .align 8 mov a15, a3 ; _j .Lexit; .align 8 .Lstore_table: l32i a3, a2, PT_AREG0; _j 1f; .align 8 mov a3, a1; _j 1f; .align 8 # fishy?? l32i a3, a2, PT_AREG2; _j 1f; .align 8 l32i a3, a2, PT_AREG3; _j 1f; .align 8 l32i a3, a2, PT_AREG4; _j 1f; .align 8 l32i a3, a2, PT_AREG5; _j 1f; .align 8 l32i a3, a2, PT_AREG6; _j 1f; .align 8 l32i a3, a2, PT_AREG7; _j 1f; .align 8 l32i a3, a2, PT_AREG8; _j 1f; .align 8 mov a3, a9 ; _j 1f; .align 8 mov a3, a10 ; _j 1f; .align 8 mov a3, a11 ; _j 1f; .align 8 mov a3, a12 ; _j 1f; .align 8 mov a3, a13 ; _j 1f; .align 8 mov a3, a14 ; _j 1f; .align 8 mov a3, a15 ; _j 1f; .align 8 /* We cannot handle this exception. */ .extern _kernel_exception .Linvalid_instruction_load: .Linvalid_instruction_store: movi a4, 0 rsr a3, excsave1 s32i a4, a3, EXC_TABLE_FIXUP /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 l32i a4, a2, PT_AREG4 wsr a0, sar mov a1, a2 rsr a0, ps bbsi.l a0, PS_UM_BIT, 2f # jump if user mode movi a0, _kernel_exception jx a0 2: movi a0, _user_exception jx a0 1: # a7: instruction pointer, a4: instruction, a3: value movi a6, 0 # mask: ffffffff:00000000 #if XCHAL_HAVE_DENSITY addi a7, a7, 2 # incr. PC,assume 16-bit instruction extui a5, a4, INSN_OP0, 4 # extract OP0 addi a5, a5, -OP0_S32I_N _beqz a5, 1f # s32i.n: jump addi a7, a7, 1 # increment PC, 32-bit instruction #else addi a7, a7, 3 # increment PC, 32-bit instruction #endif extui a5, a4, INSN_OP1, 4 # extract OP1 _beqi a5, OP1_S32I, 1f # jump if 32 bit store _bnei a5, OP1_S16I, .Linvalid_instruction_store movi a5, -1 __extl a3, a3 # get 16-bit value __exth a6, a5 # get 16-bit mask ffffffff:ffff0000 /* Get memory address */ 1: movi a4, ~3 and a4, a4, a8 # align memory address /* Insert value into memory */ movi a5, -1 # mask: ffffffff:XXXX0000 #ifdef UNALIGNED_USER_EXCEPTION addi a4, a4, 8 #endif __ssa8r a8 __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE) #ifdef UNALIGNED_USER_EXCEPTION l32e a5, a4, -8 #else l32i a5, a4, 0 # load lower address word #endif and a5, a5, a8 # mask __sh a8, a3 # shift value or a5, a5, a8 # or with original value #ifdef UNALIGNED_USER_EXCEPTION s32e a5, a4, -8 l32e a8, a4, -4 #else s32i a5, a4, 0 # store l32i a8, a4, 4 # same for upper address word #endif __sl a5, a3 and a6, a8, a6 or a6, a6, a5 #ifdef UNALIGNED_USER_EXCEPTION s32e a6, a4, -4 #else s32i a6, a4, 4 #endif .Lexit: #if XCHAL_HAVE_LOOPS rsr a4, lend # check if we reached LEND bne a7, a4, 1f rsr a4, lcount # and LCOUNT != 0 beqz a4, 1f addi a4, a4, -1 # decrement LCOUNT and set rsr a7, lbeg # set PC to LBEGIN wsr a4, lcount #endif 1: wsr a7, epc1 # skip emulated instruction /* Update icount if we're single-stepping in userspace. */ rsr a4, icountlevel beqz a4, 1f bgeui a4, LOCKLEVEL + 1, 1f rsr a4, icount addi a4, a4, 1 wsr a4, icount 1: movi a4, 0 rsr a3, excsave1 s32i a4, a3, EXC_TABLE_FIXUP /* Restore working register */ l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 l32i a4, a2, PT_AREG4 l32i a3, a2, PT_AREG3 /* restore SAR and return */ wsr a0, sar l32i a0, a2, PT_AREG0 l32i a2, a2, PT_AREG2 rfe ENDPROC(fast_unaligned) ENTRY(fast_unaligned_fixup) l32i a2, a3, EXC_TABLE_DOUBLE_SAVE wsr a3, excsave1 l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 l32i a4, a2, PT_AREG4 l32i a0, a2, PT_AREG2 xsr a0, depc # restore depc and a0 wsr a0, sar rsr a0, exccause s32i a0, a2, PT_DEPC # mark as a regular exception rsr a0, ps bbsi.l a0, PS_UM_BIT, 1f # jump if user mode rsr a0, exccause addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler l32i a3, a2, PT_AREG3 jx a0 1: rsr a0, exccause addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a3, a2, PT_AREG3 jx a0 ENDPROC(fast_unaligned_fixup) #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
aixcc-public/challenge-001-exemplar-source
3,724
arch/xtensa/lib/strnlen_user.S
/* * arch/xtensa/lib/strnlen_user.S * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Returns strnlen, including trailing zero terminator. * Zero indicates error. * * Copyright (C) 2002 Tensilica Inc. */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> /* * size_t __strnlen_user(const char *s, size_t len) */ #ifdef __XTENSA_EB__ # define MASK0 0xff000000 # define MASK1 0x00ff0000 # define MASK2 0x0000ff00 # define MASK3 0x000000ff #else # define MASK0 0x000000ff # define MASK1 0x0000ff00 # define MASK2 0x00ff0000 # define MASK3 0xff000000 #endif # Register use: # a2/ src # a3/ len # a4/ tmp # a5/ mask0 # a6/ mask1 # a7/ mask2 # a8/ mask3 # a9/ tmp # a10/ tmp .text ENTRY(__strnlen_user) abi_entry_default # a2/ s, a3/ len addi a4, a2, -4 # because we overincrement at the end; # we compensate with load offsets of 4 movi a5, MASK0 # mask for byte 0 movi a6, MASK1 # mask for byte 1 movi a7, MASK2 # mask for byte 2 movi a8, MASK3 # mask for byte 3 bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned /* * String is word-aligned. */ .Laligned: srli a10, a3, 2 # number of loop iterations with 4B per loop #if XCHAL_HAVE_LOOPS loopnez a10, .Ldone #else beqz a10, .Ldone slli a10, a10, 2 add a10, a10, a4 # a10 = end of last 4B chunk #endif /* XCHAL_HAVE_LOOPS */ .Loop: EX(10f) l32i a9, a4, 4 # get next word of string addi a4, a4, 4 # advance string pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a7, .Lz2 # if byte 2 is zero bnone a9, a8, .Lz3 # if byte 3 is zero #if !XCHAL_HAVE_LOOPS blt a4, a10, .Loop #endif .Ldone: EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks bbci.l a3, 1, .L100 # check two more bytes (bytes 0, 1 of word) addi a4, a4, 2 # advance string pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero .L100: bbci.l a3, 0, .L101 # check one more byte (byte 2 of word) # Actually, we don't need to check. Zero or nonzero, we'll add one. # Do not add an extra one for the NULL terminator since we have # exhausted the original len parameter. addi a4, a4, 1 # advance string pointer .L101: sub a2, a4, a2 # compute length abi_ret_default # NOTE that in several places below, we point to the byte just after # the zero byte in order to include the NULL terminator in the count. .Lz3: # byte 3 is zero addi a4, a4, 3 # point to zero byte .Lz0: # byte 0 is zero addi a4, a4, 1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length abi_ret_default .Lz1: # byte 1 is zero addi a4, a4, 1+1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length abi_ret_default .Lz2: # byte 2 is zero addi a4, a4, 2+1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length abi_ret_default .L1mod2: # address is odd EX(10f) l8ui a9, a4, 4 # get byte 0 addi a4, a4, 1 # advance string pointer beqz a9, .Lz3 # if byte 0 is zero bbci.l a4, 1, .Laligned # if string pointer is now word-aligned .L2mod4: # address is 2 mod 4 addi a4, a4, 2 # advance ptr for aligned access EX(10f) l32i a9, a4, 0 # get word with first two bytes of string bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero # byte 3 is zero addi a4, a4, 3+1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length abi_ret_default ENDPROC(__strnlen_user) .section .fixup, "ax" .align 4 10: movi a2, 0 abi_ret_default
aixcc-public/challenge-001-exemplar-source
1,294
arch/xtensa/lib/umodsi3.S
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> ENTRY(__umodsi3) abi_entry_default #if XCHAL_HAVE_DIV32 remu a2, a2, a3 #else bltui a3, 2, .Lle_one /* check if the divisor is <= 1 */ do_nsau a5, a2, a6, a7 /* dividend_shift = nsau (dividend) */ do_nsau a4, a3, a6, a7 /* divisor_shift = nsau (divisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = divisor_shift - dividend_shift */ ssl a4 sll a3, a3 /* divisor <<= count */ /* test-subtract-and-shift loop */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a2, a3, .Lzerobit sub a2, a2, a3 .Lzerobit: srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: .Lspecial: bltu a2, a3, .Lreturn sub a2, a2, a3 /* subtract once more if dividend >= divisor */ .Lreturn: abi_ret_default .Lle_one: bnez a3, .Lreturn0 /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ abi_ret_default ENDPROC(__umodsi3)
aixcc-public/challenge-001-exemplar-source
1,585
arch/xtensa/lib/udivsi3.S
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> ENTRY(__udivsi3) abi_entry_default #if XCHAL_HAVE_DIV32 quou a2, a2, a3 #else bltui a3, 2, .Lle_one /* check if the divisor <= 1 */ mov a6, a2 /* keep dividend in a6 */ do_nsau a5, a6, a2, a7 /* dividend_shift = nsau (dividend) */ do_nsau a4, a3, a2, a7 /* divisor_shift = nsau (divisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = divisor_shift - dividend_shift */ ssl a4 sll a3, a3 /* divisor <<= count */ movi a2, 0 /* quotient = 0 */ /* test-subtract-and-shift loop; one quotient bit on each iteration */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a6, a3, .Lzerobit sub a6, a6, a3 addi a2, a2, 1 .Lzerobit: slli a2, a2, 1 srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: bltu a6, a3, .Lreturn addi a2, a2, 1 /* increment quotient if dividend >= divisor */ .Lreturn: abi_ret_default .Lle_one: beqz a3, .Lerror /* if divisor == 1, return the dividend */ abi_ret_default .Lspecial: /* return dividend >= divisor */ bltu a6, a3, .Lreturn0 movi a2, 1 abi_ret_default .Lerror: /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ abi_ret_default ENDPROC(__udivsi3)
aixcc-public/challenge-001-exemplar-source
1,912
arch/xtensa/lib/divsi3.S
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> ENTRY(__divsi3) abi_entry_default #if XCHAL_HAVE_DIV32 quos a2, a2, a3 #else xor a7, a2, a3 /* sign = dividend ^ divisor */ do_abs a6, a2, a4 /* udividend = abs (dividend) */ do_abs a3, a3, a4 /* udivisor = abs (divisor) */ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */ do_nsau a5, a6, a2, a8 /* udividend_shift = nsau (udividend) */ do_nsau a4, a3, a2, a8 /* udivisor_shift = nsau (udivisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */ ssl a4 sll a3, a3 /* udivisor <<= count */ movi a2, 0 /* quotient = 0 */ /* test-subtract-and-shift loop; one quotient bit on each iteration */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a6, a3, .Lzerobit sub a6, a6, a3 addi a2, a2, 1 .Lzerobit: slli a2, a2, 1 srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: bltu a6, a3, .Lreturn addi a2, a2, 1 /* increment if udividend >= udivisor */ .Lreturn: neg a5, a2 movltz a2, a5, a7 /* return (sign < 0) ? -quotient : quotient */ abi_ret_default .Lle_one: beqz a3, .Lerror neg a2, a6 /* if udivisor == 1, then return... */ movgez a2, a6, a7 /* (sign < 0) ? -udividend : udividend */ abi_ret_default .Lspecial: bltu a6, a3, .Lreturn0 /* if dividend < divisor, return 0 */ movi a2, 1 movi a4, -1 movltz a2, a4, a7 /* else return (sign < 0) ? -1 : 1 */ abi_ret_default .Lerror: /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ abi_ret_default ENDPROC(__divsi3)
aixcc-public/challenge-001-exemplar-source
7,922
arch/xtensa/lib/checksum.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Xtensa version: Copyright (C) 2001 Tensilica, Inc. by Kevin Chea * Optimized by Joe Taylor */ #include <linux/errno.h> #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* * unsigned int csum_partial(const unsigned char *buf, int len, * unsigned int sum); * a2 = buf * a3 = len * a4 = sum * * This function assumes 2- or 4-byte alignment. Other alignments will fail! */ /* ONES_ADD converts twos-complement math to ones-complement. */ #define ONES_ADD(sum, val) \ add sum, sum, val ; \ bgeu sum, val, 99f ; \ addi sum, sum, 1 ; \ 99: ; .text ENTRY(csum_partial) /* * Experiments with Ethernet and SLIP connections show that buf * is aligned on either a 2-byte or 4-byte boundary. */ abi_entry_default extui a5, a2, 0, 2 bnez a5, 8f /* branch if 2-byte aligned */ /* Fall-through on common case, 4-byte alignment */ 1: srli a5, a3, 5 /* 32-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a5, 2f #else beqz a5, 2f slli a5, a5, 5 add a5, a5, a2 /* a5 = end of last 32-byte chunk */ .Loop1: #endif l32i a6, a2, 0 l32i a7, a2, 4 ONES_ADD(a4, a6) ONES_ADD(a4, a7) l32i a6, a2, 8 l32i a7, a2, 12 ONES_ADD(a4, a6) ONES_ADD(a4, a7) l32i a6, a2, 16 l32i a7, a2, 20 ONES_ADD(a4, a6) ONES_ADD(a4, a7) l32i a6, a2, 24 l32i a7, a2, 28 ONES_ADD(a4, a6) ONES_ADD(a4, a7) addi a2, a2, 4*8 #if !XCHAL_HAVE_LOOPS blt a2, a5, .Loop1 #endif 2: extui a5, a3, 2, 3 /* remaining 4-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a5, 3f #else beqz a5, 3f slli a5, a5, 2 add a5, a5, a2 /* a5 = end of last 4-byte chunk */ .Loop2: #endif l32i a6, a2, 0 ONES_ADD(a4, a6) addi a2, a2, 4 #if !XCHAL_HAVE_LOOPS blt a2, a5, .Loop2 #endif 3: _bbci.l a3, 1, 5f /* remaining 2-byte chunk */ l16ui a6, a2, 0 ONES_ADD(a4, a6) addi a2, a2, 2 5: _bbci.l a3, 0, 7f /* remaining 1-byte chunk */ 6: l8ui a6, a2, 0 #ifdef __XTENSA_EB__ slli a6, a6, 8 /* load byte into bits 8..15 */ #endif ONES_ADD(a4, a6) 7: mov a2, a4 abi_ret_default /* uncommon case, buf is 2-byte aligned */ 8: beqz a3, 7b /* branch if len == 0 */ beqi a3, 1, 6b /* branch if len == 1 */ extui a5, a2, 0, 1 bnez a5, 8f /* branch if 1-byte aligned */ l16ui a6, a2, 0 /* common case, len >= 2 */ ONES_ADD(a4, a6) addi a2, a2, 2 /* adjust buf */ addi a3, a3, -2 /* adjust len */ j 1b /* now buf is 4-byte aligned */ /* case: odd-byte aligned, len > 1 * This case is dog slow, so don't give us an odd address. * (I don't think this ever happens, but just in case.) */ 8: srli a5, a3, 2 /* 4-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a5, 2f #else beqz a5, 2f slli a5, a5, 2 add a5, a5, a2 /* a5 = end of last 4-byte chunk */ .Loop3: #endif l8ui a6, a2, 0 /* bits 24..31 */ l16ui a7, a2, 1 /* bits 8..23 */ l8ui a8, a2, 3 /* bits 0.. 8 */ #ifdef __XTENSA_EB__ slli a6, a6, 24 #else slli a8, a8, 24 #endif slli a7, a7, 8 or a7, a7, a6 or a7, a7, a8 ONES_ADD(a4, a7) addi a2, a2, 4 #if !XCHAL_HAVE_LOOPS blt a2, a5, .Loop3 #endif 2: _bbci.l a3, 1, 3f /* remaining 2-byte chunk, still odd addr */ l8ui a6, a2, 0 l8ui a7, a2, 1 #ifdef __XTENSA_EB__ slli a6, a6, 8 #else slli a7, a7, 8 #endif or a7, a7, a6 ONES_ADD(a4, a7) addi a2, a2, 2 3: j 5b /* branch to handle the remaining byte */ ENDPROC(csum_partial) /* * Copy from ds while checksumming, otherwise like csum_partial */ /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) a2 = src a3 = dst a4 = len a5 = sum a8 = temp a9 = temp a10 = temp This function is optimized for 4-byte aligned addresses. Other alignments work, but not nearly as efficiently. */ ENTRY(csum_partial_copy_generic) abi_entry_default movi a5, -1 or a10, a2, a3 /* We optimize the following alignment tests for the 4-byte aligned case. Two bbsi.l instructions might seem more optimal (commented out below). However, both labels 5: and 3: are out of the imm8 range, so the assembler relaxes them into equivalent bbci.l, j combinations, which is actually slower. */ extui a9, a10, 0, 2 beqz a9, 1f /* branch if both are 4-byte aligned */ bbsi.l a10, 0, 5f /* branch if one address is odd */ j 3f /* one address is 2-byte aligned */ /* _bbsi.l a10, 0, 5f */ /* branch if odd address */ /* _bbsi.l a10, 1, 3f */ /* branch if 2-byte-aligned address */ 1: /* src and dst are both 4-byte aligned */ srli a10, a4, 5 /* 32-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a10, 2f #else beqz a10, 2f slli a10, a10, 5 add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ .Loop5: #endif EX(10f) l32i a9, a2, 0 EX(10f) l32i a8, a2, 4 EX(10f) s32i a9, a3, 0 EX(10f) s32i a8, a3, 4 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 8 EX(10f) l32i a8, a2, 12 EX(10f) s32i a9, a3, 8 EX(10f) s32i a8, a3, 12 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 16 EX(10f) l32i a8, a2, 20 EX(10f) s32i a9, a3, 16 EX(10f) s32i a8, a3, 20 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 24 EX(10f) l32i a8, a2, 28 EX(10f) s32i a9, a3, 24 EX(10f) s32i a8, a3, 28 ONES_ADD(a5, a9) ONES_ADD(a5, a8) addi a2, a2, 32 addi a3, a3, 32 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop5 #endif 2: extui a10, a4, 2, 3 /* remaining 4-byte chunks */ extui a4, a4, 0, 2 /* reset len for general-case, 2-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a10, 3f #else beqz a10, 3f slli a10, a10, 2 add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ .Loop6: #endif EX(10f) l32i a9, a2, 0 EX(10f) s32i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 4 addi a3, a3, 4 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop6 #endif 3: /* Control comes to here in two cases: (1) It may fall through to here from the 4-byte alignment case to process, at most, one 2-byte chunk. (2) It branches to here from above if either src or dst is 2-byte aligned, and we process all bytes here, except for perhaps a trailing odd byte. It's inefficient, so align your addresses to 4-byte boundaries. a2 = src a3 = dst a4 = len a5 = sum */ srli a10, a4, 1 /* 2-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a10, 4f #else beqz a10, 4f slli a10, a10, 1 add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ .Loop7: #endif EX(10f) l16ui a9, a2, 0 EX(10f) s16i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 2 addi a3, a3, 2 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop7 #endif 4: /* This section processes a possible trailing odd byte. */ _bbci.l a4, 0, 8f /* 1-byte chunk */ EX(10f) l8ui a9, a2, 0 EX(10f) s8i a9, a3, 0 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* shift byte to bits 8..15 */ #endif ONES_ADD(a5, a9) 8: mov a2, a5 abi_ret_default 5: /* Control branch to here when either src or dst is odd. We process all bytes using 8-bit accesses. Grossly inefficient, so don't feed us an odd address. */ srli a10, a4, 1 /* handle in pairs for 16-bit csum */ #if XCHAL_HAVE_LOOPS loopgtz a10, 6f #else beqz a10, 6f slli a10, a10, 1 add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ .Loop8: #endif EX(10f) l8ui a9, a2, 0 EX(10f) l8ui a8, a2, 1 EX(10f) s8i a9, a3, 0 EX(10f) s8i a8, a3, 1 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* combine into a single 16-bit value */ #else /* for checksum computation */ slli a8, a8, 8 #endif or a9, a9, a8 ONES_ADD(a5, a9) addi a2, a2, 2 addi a3, a3, 2 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop8 #endif 6: j 4b /* process the possible trailing odd byte */ ENDPROC(csum_partial_copy_generic) # Exception handler: .section .fixup, "ax" 10: movi a2, 0 abi_ret_default .previous
aixcc-public/challenge-001-exemplar-source
7,519
arch/xtensa/lib/usercopy.S
/* * arch/xtensa/lib/usercopy.S * * Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S) * * DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>. * It needs to remain separate and distinct. The hal files are part * of the Xtensa link-time HAL, and those files may differ per * processor configuration. Patching the kernel for another * processor configuration includes replacing the hal files, and we * could lose the special functionality for accessing user-space * memory during such a patch. We sacrifice a little code space here * in favor to simplify code maintenance. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2002 Tensilica Inc. */ /* * size_t __xtensa_copy_user (void *dst, const void *src, size_t len); * * The returned value is the number of bytes not copied. Implies zero * is success. * * The general case algorithm is as follows: * If the destination and source are both aligned, * do 16B chunks with a loop, and then finish up with * 8B, 4B, 2B, and 1B copies conditional on the length. * If destination is aligned and source unaligned, * do the same, but use SRC to align the source data. * If destination is unaligned, align it by conditionally * copying 1B and 2B and then retest. * This code tries to use fall-through braches for the common * case of aligned destinations (except for the branches to * the alignment label). * * Register use: * a0/ return address * a1/ stack pointer * a2/ return value * a3/ src * a4/ length * a5/ dst * a6/ tmp * a7/ tmp * a8/ tmp * a9/ tmp * a10/ tmp * a11/ original length */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> .text ENTRY(__xtensa_copy_user) #if !XCHAL_HAVE_LOOPS && defined(__XTENSA_CALL0_ABI__) #define STACK_SIZE 4 #else #define STACK_SIZE 0 #endif abi_entry(STACK_SIZE) # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value mov a11, a4 # preserve original len for error case .Lcommon: bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2 bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4 .Ldstaligned: # return here from .Ldstunaligned when dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration movi a8, 3 # if source is also aligned, bnone a3, a8, .Laligned # then use word copy __ssa8 a3 # set shift amount from byte offset bnez a4, .Lsrcunaligned movi a2, 0 # return success for len==0 abi_ret(STACK_SIZE) /* * Destination is unaligned */ .Ldst1mod2: # dst is only byte aligned bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 EX(10f) s8i a6, a5, 0 addi a5, a5, 1 addi a4, a4, -1 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then # return to main algorithm .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes bltui a4, 6, .Lbytecopy # do short copies byte by byte EX(10f) l8ui a6, a3, 0 EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 EX(10f) s8i a6, a5, 0 EX(10f) s8i a7, a5, 1 addi a5, a5, 2 addi a4, a4, -2 j .Ldstaligned # dst is now aligned, return to main algorithm /* * Byte by byte copy */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbytecopy: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbytecopydone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbytecopydone add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 EX(10f) s8i a6, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a3, a7, .Lnextbyte #endif /* !XCHAL_HAVE_LOOPS */ .Lbytecopydone: movi a2, 0 # return success for len bytes copied abi_ret(STACK_SIZE) /* * Destination and source are word-aligned. */ # copy 16 bytes per iteration for word-aligned dst and word-aligned src .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS loopnez a7, .Loop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop1done slli a8, a7, 4 add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: EX(10f) l32i a6, a3, 0 EX(10f) l32i a7, a3, 4 EX(10f) s32i a6, a5, 0 EX(10f) l32i a6, a3, 8 EX(10f) s32i a7, a5, 4 EX(10f) l32i a7, a3, 12 EX(10f) s32i a6, a5, 8 addi a3, a3, 16 EX(10f) s32i a7, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a8, .Loop1 #endif /* !XCHAL_HAVE_LOOPS */ .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes EX(10f) l32i a6, a3, 0 EX(10f) l32i a7, a3, 4 addi a3, a3, 8 EX(10f) s32i a6, a5, 0 EX(10f) s32i a7, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # copy 4 bytes EX(10f) l32i a6, a3, 0 addi a3, a3, 4 EX(10f) s32i a6, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # copy 2 bytes EX(10f) l16ui a6, a3, 0 addi a3, a3, 2 EX(10f) s16i a6, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # copy 1 byte EX(10f) l8ui a6, a3, 0 EX(10f) s8i a6, a5, 0 .L5: movi a2, 0 # return success for len bytes copied abi_ret(STACK_SIZE) /* * Destination is aligned, Source is unaligned */ .align 4 .byte 0 # 1 mod 4 alignement for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lsrcunaligned: # copy 16 bytes per iteration for word-aligned dst and unaligned src and a10, a3, a8 # save unalignment offset for below sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) EX(10f) l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop2done #if defined(__XTENSA_CALL0_ABI__) s32i a10, a1, 0 slli a10, a7, 4 add a10, a10, a3 # a10 = end of last 16B source chunk #else slli a12, a7, 4 add a12, a12, a3 # a12 = end of last 16B source chunk #endif #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: EX(10f) l32i a7, a3, 4 EX(10f) l32i a8, a3, 8 __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 EX(10f) l32i a9, a3, 12 __src_b a7, a7, a8 EX(10f) s32i a7, a5, 4 EX(10f) l32i a6, a3, 16 __src_b a8, a8, a9 EX(10f) s32i a8, a5, 8 addi a3, a3, 16 __src_b a9, a9, a6 EX(10f) s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS #if defined(__XTENSA_CALL0_ABI__) blt a3, a10, .Loop2 l32i a10, a1, 0 #else blt a3, a12, .Loop2 #endif #endif /* !XCHAL_HAVE_LOOPS */ .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes EX(10f) l32i a7, a3, 4 EX(10f) l32i a8, a3, 8 __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 addi a3, a3, 8 __src_b a7, a7, a8 EX(10f) s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes EX(10f) l32i a7, a3, 4 addi a3, a3, 4 __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 .L13: add a3, a3, a10 # readjust a3 with correct misalignment bbci.l a4, 1, .L14 # copy 2 bytes EX(10f) l8ui a6, a3, 0 EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 EX(10f) s8i a6, a5, 0 EX(10f) s8i a7, a5, 1 addi a5, a5, 2 .L14: bbci.l a4, 0, .L15 # copy 1 byte EX(10f) l8ui a6, a3, 0 EX(10f) s8i a6, a5, 0 .L15: movi a2, 0 # return success for len bytes copied abi_ret(STACK_SIZE) ENDPROC(__xtensa_copy_user) .section .fixup, "ax" .align 4 /* a2 = original dst; a5 = current dst; a11= original len * bytes_copied = a5 - a2 * retval = bytes_not_copied = original len - bytes_copied * retval = a11 - (a5 - a2) */ 10: sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */ abi_ret(STACK_SIZE)
aixcc-public/challenge-001-exemplar-source
3,588
arch/xtensa/lib/memset.S
/* * arch/xtensa/lib/memset.S * * ANSI C standard library function memset * (Well, almost. .fixup code might return zero.) * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2002 Tensilica Inc. */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> /* * void *memset(void *dst, int c, size_t length) * * The algorithm is as follows: * Create a word with c in all byte positions * If the destination is aligned, * do 16B chucks with a loop, and then finish up with * 8B, 4B, 2B, and 1B stores conditional on the length. * If destination is unaligned, align it by conditionally * setting 1B and 2B and then go to aligned case. * This code tries to use fall-through branches for the common * case of an aligned destination (except for the branches to * the alignment labels). */ .text ENTRY(__memset) WEAK(memset) abi_entry_default # a2/ dst, a3/ c, a4/ length extui a3, a3, 0, 8 # mask to just 8 bits slli a7, a3, 8 # duplicate character in all bytes of word or a3, a3, a7 # ... slli a7, a3, 16 # ... or a3, a3, a7 # ... mov a5, a2 # copy dst so that a2 is return value movi a6, 3 # for alignment tests bany a2, a6, .Ldstunaligned # if dst is unaligned .L0: # return here from .Ldstunaligned when dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration bnez a4, .Laligned abi_ret_default /* * Destination is word-aligned. */ # set 16 bytes per iteration for word-aligned dst .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS loopnez a7, .Loop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop1done slli a6, a7, 4 add a6, a6, a5 # a6 = end of last 16B chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: EX(10f) s32i a3, a5, 0 EX(10f) s32i a3, a5, 4 EX(10f) s32i a3, a5, 8 EX(10f) s32i a3, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Loop1 #endif /* !XCHAL_HAVE_LOOPS */ .Loop1done: bbci.l a4, 3, .L2 # set 8 bytes EX(10f) s32i a3, a5, 0 EX(10f) s32i a3, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # set 4 bytes EX(10f) s32i a3, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # set 2 bytes EX(10f) s16i a3, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # set 1 byte EX(10f) s8i a3, a5, 0 .L5: .Lret1: abi_ret_default /* * Destination is unaligned */ .Ldstunaligned: bltui a4, 8, .Lbyteset # do short copies byte by byte bbci.l a5, 0, .L20 # branch if dst alignment half-aligned # dst is only byte aligned # set 1 byte EX(10f) s8i a3, a5, 0 addi a5, a5, 1 addi a4, a4, -1 # now retest if dst aligned bbci.l a5, 1, .L0 # if now aligned, return to main algorithm .L20: # dst half-aligned # set 2 bytes EX(10f) s16i a3, a5, 0 addi a5, a5, 2 addi a4, a4, -2 j .L0 # dst is now aligned, return to main algorithm /* * Byte by byte set */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbyteset: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbytesetdone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbytesetdone add a6, a5, a4 # a6 = ending address #endif /* !XCHAL_HAVE_LOOPS */ .Lbyteloop: EX(10f) s8i a3, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Lbyteloop #endif /* !XCHAL_HAVE_LOOPS */ .Lbytesetdone: abi_ret_default ENDPROC(__memset) .section .fixup, "ax" .align 4 /* We return zero if a failure occurred. */ 10: movi a2, 0 abi_ret_default
aixcc-public/challenge-001-exemplar-source
12,694
arch/xtensa/lib/memcopy.S
/* * arch/xtensa/lib/hal/memcopy.S -- Core HAL library functions * xthal_memcpy and xthal_bcopy * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2002 - 2012 Tensilica Inc. */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> /* * void *memcpy(void *dst, const void *src, size_t len); * * This function is intended to do the same thing as the standard * library function memcpy() for most cases. * However, where the source and/or destination references * an instruction RAM or ROM or a data RAM or ROM, that * source and/or destination will always be accessed with * 32-bit load and store instructions (as required for these * types of devices). * * !!!!!!! XTFIXME: * !!!!!!! Handling of IRAM/IROM has not yet * !!!!!!! been implemented. * * The (general case) algorithm is as follows: * If destination is unaligned, align it by conditionally * copying 1 and 2 bytes. * If source is aligned, * do 16 bytes with a loop, and then finish up with * 8, 4, 2, and 1 byte copies conditional on the length; * else (if source is unaligned), * do the same, but use SRC to align the source data. * This code tries to use fall-through branches for the common * case of aligned source and destination and multiple * of 4 (or 8) length. * * Register use: * a0/ return address * a1/ stack pointer * a2/ return value * a3/ src * a4/ length * a5/ dst * a6/ tmp * a7/ tmp * a8/ tmp * a9/ tmp * a10/ tmp * a11/ tmp */ .text /* * Byte by byte copy */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbytecopy: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbytecopydone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbytecopydone add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: l8ui a6, a3, 0 addi a3, a3, 1 s8i a6, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end #endif /* !XCHAL_HAVE_LOOPS */ .Lbytecopydone: abi_ret_default /* * Destination is unaligned */ .align 4 .Ldst1mod2: # dst is only byte aligned _bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte l8ui a6, a3, 0 addi a3, a3, 1 addi a4, a4, -1 s8i a6, a5, 0 addi a5, a5, 1 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then # return to main algorithm .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes _bltui a4, 6, .Lbytecopy # do short copies byte by byte l8ui a6, a3, 0 l8ui a7, a3, 1 addi a3, a3, 2 addi a4, a4, -2 s8i a6, a5, 0 s8i a7, a5, 1 addi a5, a5, 2 j .Ldstaligned # dst is now aligned, return to main algorithm ENTRY(__memcpy) WEAK(memcpy) abi_entry_default # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value .Lcommon: _bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2 _bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4 .Ldstaligned: # return here from .Ldst?mod? once dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration movi a8, 3 # if source is not aligned, _bany a3, a8, .Lsrcunaligned # then use shifting copy /* * Destination and source are word-aligned, use word copy. */ # copy 16 bytes per iteration for word-aligned dst and word-aligned src #if XCHAL_HAVE_LOOPS loopnez a7, .Loop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop1done slli a8, a7, 4 add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: l32i a6, a3, 0 l32i a7, a3, 4 s32i a6, a5, 0 l32i a6, a3, 8 s32i a7, a5, 4 l32i a7, a3, 12 s32i a6, a5, 8 addi a3, a3, 16 s32i a7, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end #endif /* !XCHAL_HAVE_LOOPS */ .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes l32i a6, a3, 0 l32i a7, a3, 4 addi a3, a3, 8 s32i a6, a5, 0 s32i a7, a5, 4 addi a5, a5, 8 .L2: bbsi.l a4, 2, .L3 bbsi.l a4, 1, .L4 bbsi.l a4, 0, .L5 abi_ret_default .L3: # copy 4 bytes l32i a6, a3, 0 addi a3, a3, 4 s32i a6, a5, 0 addi a5, a5, 4 bbsi.l a4, 1, .L4 bbsi.l a4, 0, .L5 abi_ret_default .L4: # copy 2 bytes l16ui a6, a3, 0 addi a3, a3, 2 s16i a6, a5, 0 addi a5, a5, 2 bbsi.l a4, 0, .L5 abi_ret_default .L5: # copy 1 byte l8ui a6, a3, 0 s8i a6, a5, 0 abi_ret_default /* * Destination is aligned, Source is unaligned */ .align 4 .Lsrcunaligned: _beqz a4, .Ldone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src __ssa8 a3 # set shift amount from byte offset /* set to 1 when running on ISS (simulator) with the lint or ferret client, or 0 to save a few cycles */ #define SIM_CHECKS_ALIGNMENT 1 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT and a11, a3, a8 # save unalignment offset for below sub a3, a3, a11 # align a3 #endif l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop2done slli a10, a7, 4 add a10, a10, a3 # a10 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: l32i a7, a3, 4 l32i a8, a3, 8 __src_b a6, a6, a7 s32i a6, a5, 0 l32i a9, a3, 12 __src_b a7, a7, a8 s32i a7, a5, 4 l32i a6, a3, 16 __src_b a8, a8, a9 s32i a8, a5, 8 addi a3, a3, 16 __src_b a9, a9, a6 s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS bne a3, a10, .Loop2 # continue loop if a3:src != a10:src_end #endif /* !XCHAL_HAVE_LOOPS */ .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes l32i a7, a3, 4 l32i a8, a3, 8 __src_b a6, a6, a7 s32i a6, a5, 0 addi a3, a3, 8 __src_b a7, a7, a8 s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes l32i a7, a3, 4 addi a3, a3, 4 __src_b a6, a6, a7 s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 .L13: #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT add a3, a3, a11 # readjust a3 with correct misalignment #endif bbsi.l a4, 1, .L14 bbsi.l a4, 0, .L15 .Ldone: abi_ret_default .L14: # copy 2 bytes l8ui a6, a3, 0 l8ui a7, a3, 1 addi a3, a3, 2 s8i a6, a5, 0 s8i a7, a5, 1 addi a5, a5, 2 bbsi.l a4, 0, .L15 abi_ret_default .L15: # copy 1 byte l8ui a6, a3, 0 s8i a6, a5, 0 abi_ret_default ENDPROC(__memcpy) /* * void bcopy(const void *src, void *dest, size_t n); */ ENTRY(bcopy) abi_entry_default # a2=src, a3=dst, a4=len mov a5, a3 mov a3, a2 mov a2, a5 j .Lmovecommon # go to common code for memmove+bcopy ENDPROC(bcopy) /* * void *memmove(void *dst, const void *src, size_t len); * * This function is intended to do the same thing as the standard * library function memmove() for most cases. * However, where the source and/or destination references * an instruction RAM or ROM or a data RAM or ROM, that * source and/or destination will always be accessed with * 32-bit load and store instructions (as required for these * types of devices). * * !!!!!!! XTFIXME: * !!!!!!! Handling of IRAM/IROM has not yet * !!!!!!! been implemented. * * The (general case) algorithm is as follows: * If end of source doesn't overlap destination then use memcpy. * Otherwise do memcpy backwards. * * Register use: * a0/ return address * a1/ stack pointer * a2/ return value * a3/ src * a4/ length * a5/ dst * a6/ tmp * a7/ tmp * a8/ tmp * a9/ tmp * a10/ tmp * a11/ tmp */ /* * Byte by byte copy */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbackbytecopy: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbackbytecopydone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbackbytecopydone sub a7, a3, a4 # a7 = start address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lbacknextbyte: addi a3, a3, -1 l8ui a6, a3, 0 addi a5, a5, -1 s8i a6, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a7, .Lbacknextbyte # continue loop if # $a3:src != $a7:src_start #endif /* !XCHAL_HAVE_LOOPS */ .Lbackbytecopydone: abi_ret_default /* * Destination is unaligned */ .align 4 .Lbackdst1mod2: # dst is only byte aligned _bltui a4, 7, .Lbackbytecopy # do short copies byte by byte # copy 1 byte addi a3, a3, -1 l8ui a6, a3, 0 addi a5, a5, -1 s8i a6, a5, 0 addi a4, a4, -1 _bbci.l a5, 1, .Lbackdstaligned # if dst is now aligned, then # return to main algorithm .Lbackdst2mod4: # dst 16-bit aligned # copy 2 bytes _bltui a4, 6, .Lbackbytecopy # do short copies byte by byte addi a3, a3, -2 l8ui a6, a3, 0 l8ui a7, a3, 1 addi a5, a5, -2 s8i a6, a5, 0 s8i a7, a5, 1 addi a4, a4, -2 j .Lbackdstaligned # dst is now aligned, # return to main algorithm ENTRY(__memmove) WEAK(memmove) abi_entry_default # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value .Lmovecommon: sub a6, a5, a3 bgeu a6, a4, .Lcommon add a5, a5, a4 add a3, a3, a4 _bbsi.l a5, 0, .Lbackdst1mod2 # if dst is 1 mod 2 _bbsi.l a5, 1, .Lbackdst2mod4 # if dst is 2 mod 4 .Lbackdstaligned: # return here from .Lbackdst?mod? once dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration movi a8, 3 # if source is not aligned, _bany a3, a8, .Lbacksrcunaligned # then use shifting copy /* * Destination and source are word-aligned, use word copy. */ # copy 16 bytes per iteration for word-aligned dst and word-aligned src #if XCHAL_HAVE_LOOPS loopnez a7, .LbackLoop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .LbackLoop1done slli a8, a7, 4 sub a8, a3, a8 # a8 = start of first 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .LbackLoop1: addi a3, a3, -16 l32i a7, a3, 12 l32i a6, a3, 8 addi a5, a5, -16 s32i a7, a5, 12 l32i a7, a3, 4 s32i a6, a5, 8 l32i a6, a3, 0 s32i a7, a5, 4 s32i a6, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a8, .LbackLoop1 # continue loop if a3:src != a8:src_start #endif /* !XCHAL_HAVE_LOOPS */ .LbackLoop1done: bbci.l a4, 3, .Lback2 # copy 8 bytes addi a3, a3, -8 l32i a6, a3, 0 l32i a7, a3, 4 addi a5, a5, -8 s32i a6, a5, 0 s32i a7, a5, 4 .Lback2: bbsi.l a4, 2, .Lback3 bbsi.l a4, 1, .Lback4 bbsi.l a4, 0, .Lback5 abi_ret_default .Lback3: # copy 4 bytes addi a3, a3, -4 l32i a6, a3, 0 addi a5, a5, -4 s32i a6, a5, 0 bbsi.l a4, 1, .Lback4 bbsi.l a4, 0, .Lback5 abi_ret_default .Lback4: # copy 2 bytes addi a3, a3, -2 l16ui a6, a3, 0 addi a5, a5, -2 s16i a6, a5, 0 bbsi.l a4, 0, .Lback5 abi_ret_default .Lback5: # copy 1 byte addi a3, a3, -1 l8ui a6, a3, 0 addi a5, a5, -1 s8i a6, a5, 0 abi_ret_default /* * Destination is aligned, Source is unaligned */ .align 4 .Lbacksrcunaligned: _beqz a4, .Lbackdone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src __ssa8 a3 # set shift amount from byte offset #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with * the lint or ferret client, or 0 * to save a few cycles */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT and a11, a3, a8 # save unalignment offset for below sub a3, a3, a11 # align a3 #endif l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .LbackLoop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .LbackLoop2done slli a10, a7, 4 sub a10, a3, a10 # a10 = start of first 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .LbackLoop2: addi a3, a3, -16 l32i a7, a3, 12 l32i a8, a3, 8 addi a5, a5, -16 __src_b a6, a7, a6 s32i a6, a5, 12 l32i a9, a3, 4 __src_b a7, a8, a7 s32i a7, a5, 8 l32i a6, a3, 0 __src_b a8, a9, a8 s32i a8, a5, 4 __src_b a9, a6, a9 s32i a9, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a10, .LbackLoop2 # continue loop if a3:src != a10:src_start #endif /* !XCHAL_HAVE_LOOPS */ .LbackLoop2done: bbci.l a4, 3, .Lback12 # copy 8 bytes addi a3, a3, -8 l32i a7, a3, 4 l32i a8, a3, 0 addi a5, a5, -8 __src_b a6, a7, a6 s32i a6, a5, 4 __src_b a7, a8, a7 s32i a7, a5, 0 mov a6, a8 .Lback12: bbci.l a4, 2, .Lback13 # copy 4 bytes addi a3, a3, -4 l32i a7, a3, 0 addi a5, a5, -4 __src_b a6, a7, a6 s32i a6, a5, 0 mov a6, a7 .Lback13: #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT add a3, a3, a11 # readjust a3 with correct misalignment #endif bbsi.l a4, 1, .Lback14 bbsi.l a4, 0, .Lback15 .Lbackdone: abi_ret_default .Lback14: # copy 2 bytes addi a3, a3, -2 l8ui a6, a3, 0 l8ui a7, a3, 1 addi a5, a5, -2 s8i a6, a5, 0 s8i a7, a5, 1 bbsi.l a4, 0, .Lback15 abi_ret_default .Lback15: # copy 1 byte addi a3, a3, -1 addi a5, a5, -1 l8ui a6, a3, 0 s8i a6, a5, 0 abi_ret_default ENDPROC(__memmove)
aixcc-public/challenge-001-exemplar-source
5,425
arch/xtensa/lib/strncpy_user.S
/* * arch/xtensa/lib/strncpy_user.S * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Returns: -EFAULT if exception before terminator, N if the entire * buffer filled, else strlen. * * Copyright (C) 2002 Tensilica Inc. */ #include <linux/errno.h> #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> /* * char *__strncpy_user(char *dst, const char *src, size_t len) */ #ifdef __XTENSA_EB__ # define MASK0 0xff000000 # define MASK1 0x00ff0000 # define MASK2 0x0000ff00 # define MASK3 0x000000ff #else # define MASK0 0x000000ff # define MASK1 0x0000ff00 # define MASK2 0x00ff0000 # define MASK3 0xff000000 #endif # Register use # a0/ return address # a1/ stack pointer # a2/ return value # a3/ src # a4/ len # a5/ mask0 # a6/ mask1 # a7/ mask2 # a8/ mask3 # a9/ tmp # a10/ tmp # a11/ dst .text ENTRY(__strncpy_user) abi_entry_default # a2/ dst, a3/ src, a4/ len mov a11, a2 # leave dst in return value register beqz a4, .Lret # if len is zero movi a5, MASK0 # mask for byte 0 movi a6, MASK1 # mask for byte 1 movi a7, MASK2 # mask for byte 2 movi a8, MASK3 # mask for byte 3 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned .Lsrcaligned: # return here when src is word-aligned srli a10, a4, 2 # number of loop iterations with 4B per loop movi a9, 3 bnone a11, a9, .Laligned j .Ldstunaligned .Lsrc1mod2: # src address is odd EX(11f) l8ui a9, a3, 0 # get byte 0 addi a3, a3, 1 # advance src pointer EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len beqz a4, .Lret # if len is zero bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned .Lsrc2mod4: # src address is 2 mod 4 EX(11f) l8ui a9, a3, 0 # get byte 0 /* 1-cycle interlock */ EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len beqz a4, .Lret # if len is zero EX(11f) l8ui a9, a3, 1 # get byte 0 addi a3, a3, 2 # advance src pointer EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len bnez a4, .Lsrcaligned # if len is nonzero .Lret: sub a2, a11, a2 # compute strlen abi_ret_default /* * dst is word-aligned, src is word-aligned */ .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS loopnez a10, .Loop1done #else beqz a10, .Loop1done slli a10, a10, 2 add a10, a10, a11 # a10 = end of last 4B chunck #endif .Loop1: EX(11f) l32i a9, a3, 0 # get word from src addi a3, a3, 4 # advance src pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a7, .Lz2 # if byte 2 is zero EX(10f) s32i a9, a11, 0 # store word to dst bnone a9, a8, .Lz3 # if byte 3 is zero addi a11, a11, 4 # advance dst pointer #if !XCHAL_HAVE_LOOPS blt a11, a10, .Loop1 #endif .Loop1done: bbci.l a4, 1, .L100 # copy 2 bytes EX(11f) l16ui a9, a3, 0 addi a3, a3, 2 # advance src pointer #ifdef __XTENSA_EB__ bnone a9, a7, .Lz0 # if byte 2 is zero bnone a9, a8, .Lz1 # if byte 3 is zero #else bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero #endif EX(10f) s16i a9, a11, 0 addi a11, a11, 2 # advance dst pointer .L100: bbci.l a4, 0, .Lret EX(11f) l8ui a9, a3, 0 /* slot */ EX(10f) s8i a9, a11, 0 beqz a9, .Lret # if byte is zero addi a11, a11, 1-3 # advance dst ptr 1, but also cancel # the effect of adding 3 in .Lz3 code /* fall thru to .Lz3 and "retw" */ .Lz3: # byte 3 is zero addi a11, a11, 3 # advance dst pointer sub a2, a11, a2 # compute strlen abi_ret_default .Lz0: # byte 0 is zero #ifdef __XTENSA_EB__ movi a9, 0 #endif /* __XTENSA_EB__ */ EX(10f) s8i a9, a11, 0 sub a2, a11, a2 # compute strlen abi_ret_default .Lz1: # byte 1 is zero #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ EX(10f) s16i a9, a11, 0 addi a11, a11, 1 # advance dst pointer sub a2, a11, a2 # compute strlen abi_ret_default .Lz2: # byte 2 is zero #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ EX(10f) s16i a9, a11, 0 movi a9, 0 EX(10f) s8i a9, a11, 2 addi a11, a11, 2 # advance dst pointer sub a2, a11, a2 # compute strlen abi_ret_default .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Ldstunaligned: /* * for now just use byte copy loop */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lunalignedend #else beqz a4, .Lunalignedend add a10, a11, a4 # a10 = ending address #endif /* XCHAL_HAVE_LOOPS */ .Lnextbyte: EX(11f) l8ui a9, a3, 0 addi a3, a3, 1 EX(10f) s8i a9, a11, 0 beqz a9, .Lunalignedend addi a11, a11, 1 #if !XCHAL_HAVE_LOOPS blt a11, a10, .Lnextbyte #endif .Lunalignedend: sub a2, a11, a2 # compute strlen abi_ret_default ENDPROC(__strncpy_user) .section .fixup, "ax" .align 4 /* For now, just return -EFAULT. Future implementations might * like to clear remaining kernel space, like the fixup * implementation in memset(). Thus, we differentiate between * load/store fixups. */ 10: 11: movi a2, -EFAULT abi_ret_default
aixcc-public/challenge-001-exemplar-source
5,194
arch/xtensa/lib/umulsidi3.S
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> #if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16 #define XCHAL_NO_MUL 1 #endif ENTRY(__umulsidi3) #ifdef __XTENSA_CALL0_ABI__ abi_entry(32) s32i a12, sp, 16 s32i a13, sp, 20 s32i a14, sp, 24 s32i a15, sp, 28 #elif XCHAL_NO_MUL /* This is not really a leaf function; allocate enough stack space to allow CALL12s to a helper function. */ abi_entry(32) #else abi_entry_default #endif #ifdef __XTENSA_EB__ #define wh a2 #define wl a3 #else #define wh a3 #define wl a2 #endif /* __XTENSA_EB__ */ /* This code is taken from the mulsf3 routine in ieee754-sf.S. See more comments there. */ #if XCHAL_HAVE_MUL32_HIGH mull a6, a2, a3 muluh wh, a2, a3 mov wl, a6 #else /* ! MUL32_HIGH */ #if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL /* a0 and a8 will be clobbered by calling the multiply function but a8 is not used here and need not be saved. */ s32i a0, sp, 0 #endif #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 #define a2h a4 #define a3h a5 /* Get the high halves of the inputs into registers. */ srli a2h, a2, 16 srli a3h, a3, 16 #define a2l a2 #define a3l a3 #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16 /* Clear the high halves of the inputs. This does not matter for MUL16 because the high bits are ignored. */ extui a2, a2, 0, 16 extui a3, a3, 0, 16 #endif #endif /* MUL16 || MUL32 */ #if XCHAL_HAVE_MUL16 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mul16u dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MUL32 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mull dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MAC16 /* The preprocessor insists on inserting a space when concatenating after a period in the definition of do_mul below. These macros are a workaround using underscores instead of periods when doing the concatenation. */ #define umul_aa_ll umul.aa.ll #define umul_aa_lh umul.aa.lh #define umul_aa_hl umul.aa.hl #define umul_aa_hh umul.aa.hh #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ umul_aa_ ## xhalf ## yhalf xreg, yreg; \ rsr dst, ACCLO #else /* no multiply hardware */ #define set_arg_l(dst, src) \ extui dst, src, 0, 16 #define set_arg_h(dst, src) \ srli dst, src, 16 #ifdef __XTENSA_CALL0_ABI__ #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a13, xreg); \ set_arg_ ## yhalf (a14, yreg); \ call0 .Lmul_mulsi3; \ mov dst, a12 #else #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a14, xreg); \ set_arg_ ## yhalf (a15, yreg); \ call12 .Lmul_mulsi3; \ mov dst, a14 #endif /* __XTENSA_CALL0_ABI__ */ #endif /* no multiply hardware */ /* Add pp1 and pp2 into a6 with carry-out in a9. */ do_mul(a6, a2, l, a3, h) /* pp 1 */ do_mul(a11, a2, h, a3, l) /* pp 2 */ movi a9, 0 add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: /* Shift the high half of a9/a6 into position in a9. Note that this value can be safely incremented without any carry-outs. */ ssai 16 src a9, a9, a6 /* Compute the low word into a6. */ do_mul(a11, a2, l, a3, l) /* pp 0 */ sll a6, a6 add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: /* Compute the high word into wh. */ do_mul(wh, a2, h, a3, h) /* pp 3 */ add wh, wh, a9 mov wl, a6 #endif /* !MUL32_HIGH */ #if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL /* Restore the original return address. */ l32i a0, sp, 0 #endif #ifdef __XTENSA_CALL0_ABI__ l32i a12, sp, 16 l32i a13, sp, 20 l32i a14, sp, 24 l32i a15, sp, 28 abi_ret(32) #else abi_ret_default #endif #if XCHAL_NO_MUL .macro do_addx2 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx2 \dst, \as, \at #else slli \tmp, \as, 1 add \dst, \tmp, \at #endif .endm .macro do_addx4 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx4 \dst, \as, \at #else slli \tmp, \as, 2 add \dst, \tmp, \at #endif .endm .macro do_addx8 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx8 \dst, \as, \at #else slli \tmp, \as, 3 add \dst, \tmp, \at #endif .endm /* For Xtensa processors with no multiply hardware, this simplified version of _mulsi3 is used for multiplying 16-bit chunks of the floating-point mantissas. When using CALL0, this function uses a custom ABI: the inputs are passed in a13 and a14, the result is returned in a12, and a8 and a15 are clobbered. */ .align 4 .Lmul_mulsi3: abi_entry_default .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2 movi \dst, 0 1: add \tmp1, \src2, \dst extui \tmp2, \src1, 0, 1 movnez \dst, \tmp1, \tmp2 do_addx2 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 1, 1 movnez \dst, \tmp1, \tmp2 do_addx4 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 2, 1 movnez \dst, \tmp1, \tmp2 do_addx8 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 3, 1 movnez \dst, \tmp1, \tmp2 srli \src1, \src1, 4 slli \src2, \src2, 4 bnez \src1, 1b .endm #ifdef __XTENSA_CALL0_ABI__ mul_mulsi3_body a12, a13, a14, a15, a8 #else /* The result will be written into a2, so save that argument in a4. */ mov a4, a2 mul_mulsi3_body a2, a4, a3, a5, a6 #endif abi_ret_default #endif /* XCHAL_NO_MUL */ ENDPROC(__umulsidi3)
aixcc-public/challenge-001-exemplar-source
2,572
arch/xtensa/lib/modsi3.S
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> ENTRY(__modsi3) abi_entry_default #if XCHAL_HAVE_DIV32 rems a2, a2, a3 #else mov a7, a2 /* save original (signed) dividend */ do_abs a2, a2, a4 /* udividend = abs (dividend) */ do_abs a3, a3, a4 /* udivisor = abs (divisor) */ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */ do_nsau a5, a2, a6, a8 /* udividend_shift = nsau (udividend) */ do_nsau a4, a3, a6, a8 /* udivisor_shift = nsau (udivisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */ ssl a4 sll a3, a3 /* udivisor <<= count */ /* test-subtract-and-shift loop */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a2, a3, .Lzerobit sub a2, a2, a3 .Lzerobit: srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: .Lspecial: bltu a2, a3, .Lreturn sub a2, a2, a3 /* subtract again if udividend >= udivisor */ .Lreturn: bgez a7, .Lpositive neg a2, a2 /* if (dividend < 0), return -udividend */ .Lpositive: abi_ret_default .Lle_one: bnez a3, .Lreturn0 /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ abi_ret_default ENDPROC(__modsi3) #if !XCHAL_HAVE_NSA .section .rodata .align 4 .global __nsau_data .type __nsau_data, @object __nsau_data: .byte 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4 .byte 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .size __nsau_data, . - __nsau_data #endif /* !XCHAL_HAVE_NSA */
aixcc-public/challenge-001-exemplar-source
2,332
arch/xtensa/lib/mulsi3.S
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/core.h> .macro do_addx2 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx2 \dst, \as, \at #else slli \tmp, \as, 1 add \dst, \tmp, \at #endif .endm .macro do_addx4 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx4 \dst, \as, \at #else slli \tmp, \as, 2 add \dst, \tmp, \at #endif .endm .macro do_addx8 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx8 \dst, \as, \at #else slli \tmp, \as, 3 add \dst, \tmp, \at #endif .endm ENTRY(__mulsi3) abi_entry_default #if XCHAL_HAVE_MUL32 mull a2, a2, a3 #elif XCHAL_HAVE_MUL16 or a4, a2, a3 srai a4, a4, 16 bnez a4, .LMUL16 mul16u a2, a2, a3 abi_ret_default .LMUL16: srai a4, a2, 16 srai a5, a3, 16 mul16u a7, a4, a3 mul16u a6, a5, a2 mul16u a4, a2, a3 add a7, a7, a6 slli a7, a7, 16 add a2, a7, a4 #elif XCHAL_HAVE_MAC16 mul.aa.hl a2, a3 mula.aa.lh a2, a3 rsr a5, ACCLO umul.aa.ll a2, a3 rsr a4, ACCLO slli a5, a5, 16 add a2, a4, a5 #else /* !MUL32 && !MUL16 && !MAC16 */ /* Multiply one bit at a time, but unroll the loop 4x to better exploit the addx instructions and avoid overhead. Peel the first iteration to save a cycle on init. */ /* Avoid negative numbers. */ xor a5, a2, a3 /* Top bit is 1 if one input is negative. */ do_abs a3, a3, a6 do_abs a2, a2, a6 /* Swap so the second argument is smaller. */ sub a7, a2, a3 mov a4, a3 movgez a4, a2, a7 /* a4 = max (a2, a3) */ movltz a3, a2, a7 /* a3 = min (a2, a3) */ movi a2, 0 extui a6, a3, 0, 1 movnez a2, a4, a6 do_addx2 a7, a4, a2, a7 extui a6, a3, 1, 1 movnez a2, a7, a6 do_addx4 a7, a4, a2, a7 extui a6, a3, 2, 1 movnez a2, a7, a6 do_addx8 a7, a4, a2, a7 extui a6, a3, 3, 1 movnez a2, a7, a6 bgeui a3, 16, .Lmult_main_loop neg a3, a2 movltz a2, a3, a5 abi_ret_default .align 4 .Lmult_main_loop: srli a3, a3, 4 slli a4, a4, 4 add a7, a4, a2 extui a6, a3, 0, 1 movnez a2, a7, a6 do_addx2 a7, a4, a2, a7 extui a6, a3, 1, 1 movnez a2, a7, a6 do_addx4 a7, a4, a2, a7 extui a6, a3, 2, 1 movnez a2, a7, a6 do_addx8 a7, a4, a2, a7 extui a6, a3, 3, 1 movnez a2, a7, a6 bgeui a3, 16, .Lmult_main_loop neg a3, a2 movltz a2, a3, a5 #endif /* !MUL32 && !MUL16 && !MAC16 */ abi_ret_default ENDPROC(__mulsi3)
aixcc-public/challenge-001-exemplar-source
7,456
arch/xtensa/mm/misc.S
/* * arch/xtensa/mm/misc.S * * Miscellaneous assembly functions. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2007 Tensilica Inc. * * Chris Zankel <chris@zankel.net> */ #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/page.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> #include <asm/tlbflush.h> /* * clear_page and clear_user_page are the same for non-cache-aliased configs. * * clear_page (unsigned long page) * a2 */ ENTRY(clear_page) abi_entry_default movi a3, 0 __loopi a2, a7, PAGE_SIZE, 32 s32i a3, a2, 0 s32i a3, a2, 4 s32i a3, a2, 8 s32i a3, a2, 12 s32i a3, a2, 16 s32i a3, a2, 20 s32i a3, a2, 24 s32i a3, a2, 28 __endla a2, a7, 32 abi_ret_default ENDPROC(clear_page) /* * copy_page and copy_user_page are the same for non-cache-aliased configs. * * copy_page (void *to, void *from) * a2 a3 */ ENTRY(copy_page) abi_entry_default __loopi a2, a4, PAGE_SIZE, 32 l32i a8, a3, 0 l32i a9, a3, 4 s32i a8, a2, 0 s32i a9, a2, 4 l32i a8, a3, 8 l32i a9, a3, 12 s32i a8, a2, 8 s32i a9, a2, 12 l32i a8, a3, 16 l32i a9, a3, 20 s32i a8, a2, 16 s32i a9, a2, 20 l32i a8, a3, 24 l32i a9, a3, 28 s32i a8, a2, 24 s32i a9, a2, 28 addi a2, a2, 32 addi a3, a3, 32 __endl a2, a4 abi_ret_default ENDPROC(copy_page) #ifdef CONFIG_MMU /* * If we have to deal with cache aliasing, we use temporary memory mappings * to ensure that the source and destination pages have the same color as * the virtual address. We use way 0 and 1 for temporary mappings in such cases. * * The temporary DTLB entries shouldn't be flushed by interrupts, but are * flushed by preemptive task switches. Special code in the * fast_second_level_miss handler re-established the temporary mapping. * It requires that the PPNs for the destination and source addresses are * in a6, and a7, respectively. */ /* TLB miss exceptions are treated special in the following region */ ENTRY(__tlbtemp_mapping_start) #if (DCACHE_WAY_SIZE > PAGE_SIZE) /* * clear_page_alias(void *addr, unsigned long paddr) * a2 a3 */ ENTRY(clear_page_alias) abi_entry_default movi a5, PAGE_OFFSET addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) mov a4, a2 wdtlb a6, a2 dsync movi a3, 0 __loopi a2, a7, PAGE_SIZE, 32 s32i a3, a2, 0 s32i a3, a2, 4 s32i a3, a2, 8 s32i a3, a2, 12 s32i a3, a2, 16 s32i a3, a2, 20 s32i a3, a2, 24 s32i a3, a2, 28 __endla a2, a7, 32 /* We need to invalidate the temporary dtlb entry. */ idtlb a4 dsync abi_ret_default ENDPROC(clear_page_alias) /* * copy_page_alias(void *to, void *from, * a2 a3 * unsigned long to_paddr, unsigned long from_paddr) * a4 a5 */ ENTRY(copy_page_alias) abi_entry_default /* Setup a temporary DTLB for destination. */ addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) wdtlb a6, a2 dsync /* Setup a temporary DTLB for source. */ addi a7, a5, PAGE_KERNEL addi a8, a3, 1 # way1 wdtlb a7, a8 dsync 1: __loopi a2, a4, PAGE_SIZE, 32 l32i a8, a3, 0 l32i a9, a3, 4 s32i a8, a2, 0 s32i a9, a2, 4 l32i a8, a3, 8 l32i a9, a3, 12 s32i a8, a2, 8 s32i a9, a2, 12 l32i a8, a3, 16 l32i a9, a3, 20 s32i a8, a2, 16 s32i a9, a2, 20 l32i a8, a3, 24 l32i a9, a3, 28 s32i a8, a2, 24 s32i a9, a2, 28 addi a2, a2, 32 addi a3, a3, 32 __endl a2, a4 /* We need to invalidate any temporary mapping! */ addi a2, a2, -PAGE_SIZE idtlb a2 dsync addi a3, a3, -PAGE_SIZE+1 idtlb a3 dsync abi_ret_default ENDPROC(copy_page_alias) #endif #if (DCACHE_WAY_SIZE > PAGE_SIZE) /* * void __flush_invalidate_dcache_page_alias (addr, phys) * a2 a3 */ ENTRY(__flush_invalidate_dcache_page_alias) abi_entry_default movi a7, 0 # required for exception handler addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) mov a4, a2 wdtlb a6, a2 dsync ___flush_invalidate_dcache_page a2 a3 idtlb a4 dsync abi_ret_default ENDPROC(__flush_invalidate_dcache_page_alias) /* * void __invalidate_dcache_page_alias (addr, phys) * a2 a3 */ ENTRY(__invalidate_dcache_page_alias) abi_entry_default movi a7, 0 # required for exception handler addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) mov a4, a2 wdtlb a6, a2 dsync ___invalidate_dcache_page a2 a3 idtlb a4 dsync abi_ret_default ENDPROC(__invalidate_dcache_page_alias) #endif ENTRY(__tlbtemp_mapping_itlb) #if (ICACHE_WAY_SIZE > PAGE_SIZE) ENTRY(__invalidate_icache_page_alias) abi_entry_default addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) mov a4, a2 witlb a6, a2 isync ___invalidate_icache_page a2 a3 iitlb a4 isync abi_ret_default ENDPROC(__invalidate_icache_page_alias) #endif /* End of special treatment in tlb miss exception */ ENTRY(__tlbtemp_mapping_end) #endif /* CONFIG_MMU /* * void __invalidate_icache_page(ulong start) */ ENTRY(__invalidate_icache_page) abi_entry_default ___invalidate_icache_page a2 a3 isync abi_ret_default ENDPROC(__invalidate_icache_page) /* * void __invalidate_dcache_page(ulong start) */ ENTRY(__invalidate_dcache_page) abi_entry_default ___invalidate_dcache_page a2 a3 dsync abi_ret_default ENDPROC(__invalidate_dcache_page) /* * void __flush_invalidate_dcache_page(ulong start) */ ENTRY(__flush_invalidate_dcache_page) abi_entry_default ___flush_invalidate_dcache_page a2 a3 dsync abi_ret_default ENDPROC(__flush_invalidate_dcache_page) /* * void __flush_dcache_page(ulong start) */ ENTRY(__flush_dcache_page) abi_entry_default ___flush_dcache_page a2 a3 dsync abi_ret_default ENDPROC(__flush_dcache_page) /* * void __invalidate_icache_range(ulong start, ulong size) */ ENTRY(__invalidate_icache_range) abi_entry_default ___invalidate_icache_range a2 a3 a4 isync abi_ret_default ENDPROC(__invalidate_icache_range) /* * void __flush_invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__flush_invalidate_dcache_range) abi_entry_default ___flush_invalidate_dcache_range a2 a3 a4 dsync abi_ret_default ENDPROC(__flush_invalidate_dcache_range) /* * void _flush_dcache_range(ulong start, ulong size) */ ENTRY(__flush_dcache_range) abi_entry_default ___flush_dcache_range a2 a3 a4 dsync abi_ret_default ENDPROC(__flush_dcache_range) /* * void _invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__invalidate_dcache_range) abi_entry_default ___invalidate_dcache_range a2 a3 a4 abi_ret_default ENDPROC(__invalidate_dcache_range) /* * void _invalidate_icache_all(void) */ ENTRY(__invalidate_icache_all) abi_entry_default ___invalidate_icache_all a2 a3 isync abi_ret_default ENDPROC(__invalidate_icache_all) /* * void _flush_invalidate_dcache_all(void) */ ENTRY(__flush_invalidate_dcache_all) abi_entry_default ___flush_invalidate_dcache_all a2 a3 dsync abi_ret_default ENDPROC(__flush_invalidate_dcache_all) /* * void _invalidate_dcache_all(void) */ ENTRY(__invalidate_dcache_all) abi_entry_default ___invalidate_dcache_all a2 a3 dsync abi_ret_default ENDPROC(__invalidate_dcache_all)
aixcc-public/challenge-001-exemplar-source
5,049
arch/xtensa/boot/boot-redboot/bootstrap.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/core.h> #include <asm/regs.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> #include <asm/processor.h> /* * RB-Data: RedBoot data/bss * P: Boot-Parameters * L: Kernel-Loader * * The Linux-Kernel image including the loader must be loaded * to a position so that the kernel and the boot parameters * can fit in the space before the load address. * ______________________________________________________ * |_RB-Data_|_P_|__________|_L_|___Linux-Kernel___|______| * ^ * ^ Load address * ______________________________________________________ * |___Linux-Kernel___|_P_|_L_|___________________________| * * The loader copies the parameter to the position that will * be the end of the kernel and itself to the end of the * parameter list. */ /* Make sure we have enough space for the 'uncompressor' */ #define STACK_SIZE 32768 #define HEAP_SIZE (131072*4) # a2: Parameter list # a3: Size of parameter list .section .start, "ax" .globl __start /* this must be the first byte of the loader! */ __start: abi_entry(32) # we do not intend to return _call0 _start __start_a0: .align 4 .section .text, "ax" .literal_position .begin literal_prefix .text /* put literals in here! */ .globl _start _start: /* 'reset' window registers */ movi a4, 1 wsr a4, ps rsync #if XCHAL_HAVE_WINDOWED rsr a5, windowbase ssl a5 sll a4, a4 wsr a4, windowstart rsync #endif movi a4, KERNEL_PS_WOE_MASK wsr a4, ps rsync KABI_C0 mov abi_saved0, abi_arg0 /* copy the loader to its address * Note: The loader itself is a very small piece, so we assume we * don't partially overlap. We also assume (even more important) * that the kernel image is out of the way. Usually, when the * load address of this image is not at an arbitrary address, * but aligned to some 10K's we shouldn't overlap. */ /* Note: The assembler cannot relax "addi a0, a0, ..." to an l32r, so we load to a4 first. */ # addi a4, a0, __start - __start_a0 # mov a0, a4 movi a4, __start movi a5, __start_a0 add a4, a0, a4 sub a0, a4, a5 movi a4, __start movi a5, __reloc_end # a0: address where this code has been loaded # a4: compiled address of __start # a5: compiled end address mov.n a7, a0 mov.n a8, a4 1: l32i a10, a7, 0 l32i a11, a7, 4 s32i a10, a8, 0 s32i a11, a8, 4 l32i a10, a7, 8 l32i a11, a7, 12 s32i a10, a8, 8 s32i a11, a8, 12 addi a8, a8, 16 addi a7, a7, 16 blt a8, a5, 1b /* We have to flush and invalidate the caches here before we jump. */ #if XCHAL_DCACHE_IS_WRITEBACK ___flush_dcache_all a5 a6 #endif ___invalidate_icache_all a5 a6 isync movi a11, _reloc jx a11 .globl _reloc _reloc: /* RedBoot is now at the end of the memory, so we don't have * to copy the parameter list. Keep the code around; in case * we need it again. */ #if 0 # a0: load address # a2: start address of parameter list # a3: length of parameter list # a4: __start /* copy the parameter list out of the way */ movi a6, _param_start add a3, a2, a3 2: l32i a8, a2, 0 s32i a8, a6, 0 addi a2, a2, 4 addi a6, a6, 4 blt a2, a3, 2b #endif /* clear BSS section */ movi a6, __bss_start movi a7, __bss_end movi.n a5, 0 3: s32i a5, a6, 0 addi a6, a6, 4 blt a6, a7, 3b movi a5, -16 movi a1, _stack + STACK_SIZE and a1, a1, a5 /* Uncompress the kernel */ # a0: load address # a2: boot parameter # a4: __start movi a3, __image_load sub a4, a3, a4 add abi_arg2, a0, a4 # a1 Stack # a8(a4) Load address of the image movi abi_arg0, _image_start movi abi_arg4, _image_end movi abi_arg1, 0x1000000 sub abi_tmp0, abi_arg4, abi_arg0 movi abi_arg3, complen s32i abi_tmp0, abi_arg3, 0 movi a0, 0 # abi_arg0 destination # abi_arg1 maximum size of destination # abi_arg2 source # abi_arg3 ptr to length .extern gunzip movi abi_tmp0, gunzip beqz abi_tmp0, 1f abi_callx abi_tmp0 j 2f # abi_arg0 destination start # abi_arg1 maximum size of destination # abi_arg2 source start # abi_arg3 ptr to length # abi_arg4 destination end 1: l32i abi_tmp0, abi_arg2, 0 l32i abi_tmp1, abi_arg2, 4 s32i abi_tmp0, abi_arg0, 0 s32i abi_tmp1, abi_arg0, 4 l32i abi_tmp0, abi_arg2, 8 l32i abi_tmp1, abi_arg2, 12 s32i abi_tmp0, abi_arg0, 8 s32i abi_tmp1, abi_arg0, 12 addi abi_arg0, abi_arg0, 16 addi abi_arg2, abi_arg2, 16 blt abi_arg0, abi_arg4, 1b /* jump to the kernel */ 2: #if XCHAL_DCACHE_IS_WRITEBACK ___flush_dcache_all a5 a6 #endif ___invalidate_icache_all a5 a6 isync # a2 Boot parameter list KABI_C0 mov abi_arg0, abi_saved0 movi a0, _image_start jx a0 .align 16 .data .globl avail_ram avail_ram: .long _heap .globl end_avail end_avail: .long _heap + HEAP_SIZE .comm _stack, STACK_SIZE .comm _heap, HEAP_SIZE .globl end_avail .comm complen, 4 .end literal_prefix
aixcc-public/challenge-001-exemplar-source
1,464
arch/xtensa/boot/boot-elf/bootstrap.S
/* * arch/xtensa/boot/boot-elf/bootstrap.S * * Low-level exception handling * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004 - 2013 by Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com> * Piet Delaney <piet@tensilica.com> */ #include <asm/bootparam.h> #include <asm/initialize_mmu.h> #include <asm/vectors.h> #include <linux/linkage.h> .section .ResetVector.text, "ax" .global _ResetVector .global reset _ResetVector: _j _SetupMMU .begin no-absolute-literals .literal_position #ifdef CONFIG_PARSE_BOOTPARAM .align 4 _bootparam: .short BP_TAG_FIRST .short 4 .long BP_VERSION .short BP_TAG_LAST .short 0 .long 0 #endif .align 4 _SetupMMU: #if XCHAL_HAVE_WINDOWED movi a0, 0 wsr a0, windowbase rsync movi a0, 1 wsr a0, windowstart rsync #endif movi a0, 0x1F wsr a0, ps rsync #ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #endif rsil a0, XCHAL_DEBUGLEVEL-1 rsync reset: #if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY movi a0, CONFIG_KERNEL_LOAD_ADDRESS #else movi a0, KERNELOFFSET #endif #ifdef CONFIG_PARSE_BOOTPARAM movi a2, _bootparam #else movi a2, 0 #endif movi a3, 0 movi a4, 0 jx a0 .end no-absolute-literals
aixcc-public/challenge-001-exemplar-source
20,185
arch/s390/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * S390 low-level entry points. * * Copyright IBM Corp. 1999, 2012 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Hartmut Penner (hp@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-extable.h> #include <asm/alternative-asm.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/dwarf.h> #include <asm/errno.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/page.h> #include <asm/sigp.h> #include <asm/irq.h> #include <asm/vx-insn.h> #include <asm/setup.h> #include <asm/nmi.h> #include <asm/export.h> #include <asm/nospec-insn.h> STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _LPP_OFFSET = __LC_LPP .macro STBEAR address ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 .endm .macro LBEAR address ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 .endm .macro LPSWEY address,lpswe ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 .endm .macro MBEAR reg ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 .endm .macro CHECK_STACK savearea #ifdef CONFIG_CHECK_STACK tml %r15,STACK_SIZE - CONFIG_STACK_GUARD lghi %r14,\savearea jz stack_overflow #endif .endm .macro CHECK_VMAP_STACK savearea,oklabel #ifdef CONFIG_VMAP_STACK lgr %r14,%r15 nill %r14,0x10000 - STACK_SIZE oill %r14,STACK_INIT clg %r14,__LC_KERNEL_STACK je \oklabel clg %r14,__LC_ASYNC_STACK je \oklabel clg %r14,__LC_MCCK_STACK je \oklabel clg %r14,__LC_NODAT_STACK je \oklabel clg %r14,__LC_RESTART_STACK je \oklabel lghi %r14,\savearea j stack_overflow #else j \oklabel #endif .endm /* * The TSTMSK macro generates a test-under-mask instruction by * calculating the memory offset for the specified mask value. * Mask value can be any constant. The macro shifts the mask * value to calculate the memory offset for the test-under-mask * instruction. */ .macro TSTMSK addr, mask, size=8, bytepos=0 .if (\bytepos < \size) && (\mask >> 8) .if (\mask & 0xff) .error "Mask exceeds byte boundary" .endif TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" .exitm .endif .ifeq \mask .error "Mask must not be zero" .endif off = \size - \bytepos - 1 tm off+\addr, \mask .endm .macro BPOFF ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 .endm .macro BPON ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 .endm .macro BPENTER tif_ptr,tif_mask ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ "j .+12; nop; nop", 82 .endm .macro BPEXIT tif_ptr,tif_mask TSTMSK \tif_ptr,\tif_mask ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 .endm /* * The CHKSTG macro jumps to the provided label in case the * machine check interruption code reports one of unrecoverable * storage errors: * - Storage error uncorrected * - Storage key error uncorrected * - Storage degradation with Failing-storage-address validity */ .macro CHKSTG errlabel TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR) jnz \errlabel TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD jz .Loklabel\@ TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR jnz \errlabel .Loklabel\@: .endm #if IS_ENABLED(CONFIG_KVM) /* * The OUTSIDE macro jumps to the provided label in case the value * in the provided register is outside of the provided range. The * macro is useful for checking whether a PSW stored in a register * pair points inside or outside of a block of instructions. * @reg: register to check * @start: start of the range * @end: end of the range * @outside_label: jump here if @reg is outside of [@start..@end) */ .macro OUTSIDE reg,start,end,outside_label lgr %r14,\reg larl %r13,\start slgr %r14,%r13 #ifdef CONFIG_AS_IS_LLVM clgfrl %r14,.Lrange_size\@ #else clgfi %r14,\end - \start #endif jhe \outside_label #ifdef CONFIG_AS_IS_LLVM .section .rodata, "a" .align 4 .Lrange_size\@: .long \end - \start .previous #endif .endm .macro SIEEXIT lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit .endm #endif GEN_BR_THUNK %r14 .section .kprobes.text, "ax" .Ldummy: /* * This nop exists only in order to avoid that __bpon starts at * the beginning of the kprobes text section. In that case we would * have several symbols at the same address. E.g. objdump would take * an arbitrary symbol name when disassembling this code. * With the added nop in between the __bpon symbol is unique * again. */ nop 0 ENTRY(__bpon) .globl __bpon BPON BR_EX %r14 ENDPROC(__bpon) /* * Scheduler resume function, called by switch_to * gpr2 = (task_struct *) prev * gpr3 = (task_struct *) next * Returns: * gpr2 = prev */ ENTRY(__switch_to) stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task lghi %r4,__TASK_stack lghi %r1,__TASK_thread llill %r5,STACK_INIT stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev lg %r15,0(%r4,%r3) # start of kernel stack of next agr %r15,%r5 # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next stg %r15,__LC_KERNEL_STACK # store end of kernel stack lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next aghi %r3,__TASK_pid mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 BR_EX %r14 ENDPROC(__switch_to) #if IS_ENABLED(CONFIG_KVM) /* * sie64a calling convention: * %r2 pointer to sie control block * %r3 guest register save area */ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers lg %r12,__LC_CURRENT stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags lmg %r0,%r13,0(%r3) # load guest gprs 0-13 lg %r14,__LC_GMAP # get gmap pointer ltgr %r14,%r14 jz .Lsie_gmap lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce .Lsie_gmap: lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now tm __SIE_PROG20+3(%r14),3 # last exit... jnz .Lsie_skip TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsie_skip # exit if fp/vx regs changed BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_entry: sie 0(%r14) # Let the next instruction be NOP to avoid triggering a machine check # and handling it in a guest as result of the instruction execution. nopr 7 .Lsie_leave: BPOFF BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_skip: ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce .Lsie_done: # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. # Other instructions between sie64a and .Lsie_done should not cause program # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. .Lrewind_pad6: nopr 7 .Lrewind_pad4: nopr 7 .Lrewind_pad2: nopr 7 .globl sie_exit sie_exit: lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 xgr %r0,%r0 # clear guest registers to xgr %r1,%r1 # prevent speculative use xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers lg %r2,__SF_SIE_REASON(%r15) # return exit reason code BR_EX %r14 .Lsie_fault: lghi %r14,-EFAULT stg %r14,__SF_SIE_REASON(%r15) # set exit reason code j sie_exit EX_TABLE(.Lrewind_pad6,.Lsie_fault) EX_TABLE(.Lrewind_pad4,.Lsie_fault) EX_TABLE(.Lrewind_pad2,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault) ENDPROC(sie64a) EXPORT_SYMBOL(sie64a) EXPORT_SYMBOL(sie_exit) #endif /* * SVC interrupt handler routine. System calls are synchronous events and * are entered with interrupts disabled. */ ENTRY(system_call) stpt __LC_SYS_ENTER_TIMER stmg %r8,%r15,__LC_SAVE_AREA_SYNC BPOFF lghi %r14,0 .Lsysc_per: STBEAR __LC_LAST_BREAK lctlg %c1,%c1,__LC_KERNEL_ASCE lg %r12,__LC_CURRENT lg %r15,__LC_KERNEL_STACK xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP # clear user controlled register to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 xgr %r8,%r8 xgr %r9,%r9 xgr %r10,%r10 xgr %r11,%r11 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC MBEAR %r2 lgr %r3,%r14 brasl %r14,__do_syscall lctlg %c1,%c1,__LC_USER_ASCE mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) stpt __LC_EXIT_TIMER LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE ENDPROC(system_call) # # a new process exits the kernel with ret_from_fork # ENTRY(ret_from_fork) lgr %r3,%r11 brasl %r14,__ret_from_fork lctlg %c1,%c1,__LC_USER_ASCE mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) stpt __LC_EXIT_TIMER LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE ENDPROC(ret_from_fork) /* * Program check handler routine */ ENTRY(pgm_check_handler) stpt __LC_SYS_ENTER_TIMER BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r12,__LC_CURRENT lghi %r10,0 lmg %r8,%r9,__LC_PGM_OLD_PSW tmhh %r8,0x0001 # coming from user space? jno .Lpgm_skip_asce lctlg %c1,%c1,__LC_KERNEL_ASCE j 3f # -> fault in user space .Lpgm_skip_asce: #if IS_ENABLED(CONFIG_KVM) # cleanup critical section for program checks in sie64a OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f SIEEXIT lghi %r10,_PIF_GUEST_FAULT #endif 1: tmhh %r8,0x4000 # PER bit set in old PSW ? jnz 2f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc 2: CHECK_STACK __LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) # CHECK_VMAP_STACK branches to stack_overflow or 4f CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lg %r15,__LC_KERNEL_STACK 4: la %r11,STACK_FRAME_OVERHEAD(%r15) stg %r10,__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK stmg %r8,%r9,__PT_PSW(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 lgr %r2,%r11 brasl %r14,__do_pgm_check tmhh %r8,0x0001 # returning to user space? jno .Lpgm_exit_kernel lctlg %c1,%c1,__LC_USER_ASCE BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER .Lpgm_exit_kernel: mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # # single stepped system call # .Lpgm_svcper: mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW larl %r14,.Lsysc_per stg %r14,__LC_RETURN_PSW+8 lghi %r14,1 LBEAR __LC_PGM_LAST_BREAK LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per ENDPROC(pgm_check_handler) /* * Interrupt handler macro used for external and IO interrupts. */ .macro INT_HANDLER name,lc_old_psw,handler ENTRY(\name) stckf __LC_INT_CLOCK stpt __LC_SYS_ENTER_TIMER STBEAR __LC_LAST_BREAK BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT lmg %r8,%r9,\lc_old_psw tmhh %r8,0x0001 # interrupting from user ? jnz 1f #if IS_ENABLED(CONFIG_KVM) OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) SIEEXIT #endif 0: CHECK_STACK __LC_SAVE_AREA_ASYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lctlg %c1,%c1,__LC_KERNEL_ASCE lg %r15,__LC_KERNEL_STACK 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 xgr %r10,%r10 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC MBEAR %r11 stmg %r8,%r9,__PT_PSW(%r11) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,\handler mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) tmhh %r8,0x0001 # returning to user ? jno 2f lctlg %c1,%c1,__LC_USER_ASCE BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER 2: LBEAR __PT_LAST_BREAK(%r11) lmg %r0,%r15,__PT_R0(%r11) LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE ENDPROC(\name) .endm INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq /* * Load idle PSW. */ ENTRY(psw_idle) stg %r14,(__SF_GPRS+8*8)(%r15) stg %r3,__SF_EMPTY(%r15) larl %r1,psw_idle_exit stg %r1,__SF_EMPTY+8(%r15) larl %r1,smp_cpu_mtid llgf %r1,0(%r1) ltgr %r1,%r1 jz .Lpsw_idle_stcctm .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) .Lpsw_idle_stcctm: oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT BPON stckf __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) lpswe __SF_EMPTY(%r15) .globl psw_idle_exit psw_idle_exit: BR_EX %r14 ENDPROC(psw_idle) /* * Machine check handler routines */ ENTRY(mcck_int_handler) stckf __LC_MCCK_CLOCK BPOFF la %r1,4095 # validate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs lg %r12,__LC_CURRENT lmg %r8,%r9,__LC_MCK_OLD_PSW TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE jo .Lmcck_panic # yes -> rest of mcck code invalid TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID jno .Lmcck_panic # control registers invalid -> panic la %r14,4095 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs ptlb lghi %r14,__LC_CPU_TIMER_SAVE_AREA mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID jo 3f la %r14,__LC_SYS_ENTER_TIMER clc 0(8,%r14),__LC_EXIT_TIMER jl 1f la %r14,__LC_EXIT_TIMER 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER jl 2f la %r14,__LC_LAST_UPDATE_TIMER 2: spt 0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID jno .Lmcck_panic tmhh %r8,0x0001 # interrupting from user ? jnz 6f TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID jno .Lmcck_panic #if IS_ENABLED(CONFIG_KVM) OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST j 5f 4: CHKSTG .Lmcck_panic 5: larl %r14,.Lstosm_tmp stosm 0(%r14),0x04 # turn dat on, keep irqs off BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) SIEEXIT j .Lmcck_stack #endif 6: CHKSTG .Lmcck_panic larl %r14,.Lstosm_tmp stosm 0(%r14),0x04 # turn dat on, keep irqs off tmhh %r8,0x0001 # interrupting from user ? jz .Lmcck_stack BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP .Lmcck_stack: lg %r15,__LC_MCCK_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) stctg %c1,%c1,__PT_CR1(%r11) lctlg %c1,%c1,__LC_KERNEL_ASCE xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 xgr %r10,%r10 mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check cghi %r2,0 je .Lmcck_return lg %r1,__LC_KERNEL_STACK # switch to kernel stack mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r2,%r11 lgr %r15,%r1 brasl %r14,s390_handle_mcck .Lmcck_return: lctlg %c1,%c1,__PT_CR1(%r11) lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER 0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 LBEAR 0(%r12) lmg %r11,%r15,__PT_R11(%r11) LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE .Lmcck_panic: /* * Iterate over all possible CPU addresses in the range 0..0xffff * and stop each CPU using signal processor. Use compare and swap * to allow just one CPU-stopper and prevent concurrent CPUs from * stopping each other while leaving the others running. */ lhi %r5,0 lhi %r6,1 larl %r7,.Lstop_lock cs %r5,%r6,0(%r7) # single CPU-stopper only jnz 4f larl %r7,.Lthis_cpu stap 0(%r7) # this CPU address lh %r4,0(%r7) nilh %r4,0 lhi %r0,1 sll %r0,16 # CPU counter lhi %r3,0 # next CPU address 0: cr %r3,%r4 je 2f 1: sigp %r1,%r3,SIGP_STOP # stop next CPU brc SIGP_CC_BUSY,1b 2: ahi %r3,1 brct %r0,0b 3: sigp %r1,%r4,SIGP_STOP # stop this CPU brc SIGP_CC_BUSY,3b 4: j 4b ENDPROC(mcck_int_handler) ENTRY(restart_int_handler) ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 stg %r15,__LC_SAVE_AREA_RESTART TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 jz 0f la %r15,4095 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15) 0: larl %r15,.Lstosm_tmp stosm 0(%r15),0x04 # turn dat on, keep irqs off lg %r15,__LC_RESTART_STACK xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) lg %r1,__LC_RESTART_FN # load fn, parm & source cpu lg %r2,__LC_RESTART_DATA lgf %r3,__LC_RESTART_SOURCE ltgr %r3,%r3 # test source cpu address jm 1f # negative -> skip source stop 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu brc 10,0b # wait for status stored 1: basr %r14,%r1 # call function stap __SF_EMPTY(%r15) # store cpu address llgh %r3,__SF_EMPTY(%r15) 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu brc 2,2b 3: j 3b ENDPROC(restart_int_handler) .section .kprobes.text, "ax" #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) /* * The synchronous or the asynchronous stack overflowed. We are dead. * No need to properly save the registers, we are going to panic anyway. * Setup a pt_regs so that show_trace can provide a good call trace. */ ENTRY(stack_overflow) lg %r15,__LC_NODAT_STACK # change to panic stack la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(64,%r11),0(%r14) stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs jg kernel_stack_overflow ENDPROC(stack_overflow) #endif .section .data, "aw" .align 4 .Lstop_lock: .long 0 .Lthis_cpu: .short 0 .Lstosm_tmp: .byte 0 .section .rodata, "a" #define SYSCALL(esame,emu) .quad __s390x_ ## esame .globl sys_call_table sys_call_table: #include "asm/syscall_table.h" #undef SYSCALL #ifdef CONFIG_COMPAT #define SYSCALL(esame,emu) .quad __s390_ ## emu .globl sys_call_table_emu sys_call_table_emu: #include "asm/syscall_table.h" #undef SYSCALL #endif
aixcc-public/challenge-001-exemplar-source
2,059
arch/s390/kernel/relocate_kernel.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 2005 * * Author(s): Rolf Adelsberger * */ #include <linux/linkage.h> #include <asm/page.h> #include <asm/sigp.h> /* * moves the new kernel to its destination... * %r2 = pointer to first kimage_entry_t * %r3 = start address - where to jump to after the job is done... * %r4 = subcode * * %r5 will be used as temp. storage * %r6 holds the destination address * %r7 = PAGE_SIZE * %r8 holds the source address * %r9 = PAGE_SIZE * * 0xf000 is a page_mask */ .text ENTRY(relocate_kernel) basr %r13,0 # base address .base: lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7 lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9 lg %r5,0(%r2) # read another word for indirection page aghi %r2,8 # increment pointer tml %r5,0x1 # is it a destination page? je .indir_check # NO, goto "indir_check" lgr %r6,%r5 # r6 = r5 nill %r6,0xf000 # mask it out and... j .base # ...next iteration .indir_check: tml %r5,0x2 # is it a indirection page? je .done_test # NO, goto "done_test" nill %r5,0xf000 # YES, mask out, lgr %r2,%r5 # move it into the right register, j .base # and read next... .done_test: tml %r5,0x4 # is it the done indicator? je .source_test # NO! Well, then it should be the source indicator... j .done # ok, lets finish it here... .source_test: tml %r5,0x8 # it should be a source indicator... je .base # NO, ignore it... lgr %r8,%r5 # r8 = r5 nill %r8,0xf000 # masking 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 jo 0b j .base .done: lgr %r0,%r4 # subcode cghi %r3,0 je .diag la %r4,load_psw-.base(%r13) # load psw-address into the register o %r3,4(%r4) # or load address into psw st %r3,4(%r4) mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 .diag: diag %r0,%r0,0x308 ENDPROC(relocate_kernel) .align 8 load_psw: .long 0x00080000,0x80000000 relocate_kernel_end: .align 8 .globl relocate_kernel_len relocate_kernel_len: .quad relocate_kernel_end - relocate_kernel
aixcc-public/challenge-001-exemplar-source
4,222
arch/s390/kernel/mcount.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 2008, 2009 * */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ftrace.h> #include <asm/nospec-insn.h> #include <asm/ptrace.h> #include <asm/export.h> #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) #define STACK_PTREGS (STACK_FRAME_OVERHEAD) #define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS) #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) #define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2) #define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS) /* packed stack: allocate just enough for r14, r15 and backchain */ #define TRACED_FUNC_FRAME_SIZE 24 #ifdef CONFIG_FUNCTION_TRACER GEN_BR_THUNK %r1 GEN_BR_THUNK %r14 .section .kprobes.text, "ax" ENTRY(ftrace_stub) BR_EX %r14 ENDPROC(ftrace_stub) .macro ftrace_regs_entry, allregs=0 stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller .if \allregs == 1 # save psw mask # don't put any instructions clobbering CC before this point epsw %r1,%r14 risbg %r14,%r1,0,31,32 .endif lgr %r1,%r15 # allocate stack frame for ftrace_caller to contain traced function aghi %r15,-TRACED_FUNC_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r0,(__SF_GPRS+8*8)(%r15) stg %r15,(__SF_GPRS+9*8)(%r15) # allocate pt_regs and stack frame for ftrace_trace_function aghi %r15,-STACK_FRAME_SIZE stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) xc STACK_PTREGS_ORIG_GPR2(8,%r15),STACK_PTREGS_ORIG_GPR2(%r15) .if \allregs == 1 stg %r14,(STACK_PTREGS_PSW)(%r15) mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS .else xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15) .endif lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address aghi %r1,-TRACED_FUNC_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r0,(STACK_PTREGS_PSW+8)(%r15) stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15) .endm SYM_CODE_START(ftrace_regs_caller) ftrace_regs_entry 1 j ftrace_common SYM_CODE_END(ftrace_regs_caller) SYM_CODE_START(ftrace_caller) ftrace_regs_entry 0 j ftrace_common SYM_CODE_END(ftrace_caller) SYM_CODE_START(ftrace_common) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES aghik %r2,%r0,-MCOUNT_INSN_SIZE lgrl %r4,function_trace_op lgrl %r1,ftrace_func #else lgr %r2,%r0 aghi %r2,-MCOUNT_INSN_SIZE larl %r4,function_trace_op lg %r4,0(%r4) larl %r1,ftrace_func lg %r1,0(%r1) #endif lgr %r3,%r14 la %r5,STACK_PTREGS(%r15) BASR_EX %r14,%r1 #ifdef CONFIG_FUNCTION_GRAPH_TRACER # The j instruction gets runtime patched to a nop instruction. # See ftrace_enable_ftrace_graph_caller. SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL) j .Lftrace_graph_caller_end lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15) lg %r4,(STACK_PTREGS_PSW+8)(%r15) brasl %r14,prepare_ftrace_return stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15) .Lftrace_graph_caller_end: #endif lg %r0,(STACK_PTREGS_PSW+8)(%r15) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES ltg %r1,STACK_PTREGS_ORIG_GPR2(%r15) locgrz %r1,%r0 #else lg %r1,STACK_PTREGS_ORIG_GPR2(%r15) ltgr %r1,%r1 jnz 0f lgr %r1,%r0 #endif 0: lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) BR_EX %r1 SYM_CODE_END(ftrace_common) #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_FUNC_START(return_to_handler) stmg %r2,%r5,32(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD stg %r1,__SF_BACKCHAIN(%r15) brasl %r14,ftrace_return_to_handler aghi %r15,STACK_FRAME_OVERHEAD lgr %r14,%r2 lmg %r2,%r5,32(%r15) BR_EX %r14 SYM_FUNC_END(return_to_handler) #endif #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_KPROBES SYM_FUNC_START(__kretprobe_trampoline) stg %r14,(__SF_GPRS+8*8)(%r15) lay %r15,-STACK_FRAME_SIZE(%r15) stmg %r0,%r14,STACK_PTREGS_GPRS(%r15) # store original stack pointer in backchain and pt_regs lay %r7,STACK_FRAME_SIZE(%r15) stg %r7,__SF_BACKCHAIN(%r15) stg %r7,STACK_PTREGS_GPRS+(15*8)(%r15) # store full psw epsw %r2,%r3 risbg %r3,%r2,0,31,32 stg %r3,STACK_PTREGS_PSW(%r15) larl %r1,__kretprobe_trampoline stg %r1,STACK_PTREGS_PSW+8(%r15) lay %r2,STACK_PTREGS(%r15) brasl %r14,trampoline_probe_handler mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15) lmg %r0,%r15,STACK_PTREGS_GPRS(%r15) lpswe __SF_EMPTY(%r15) SYM_FUNC_END(__kretprobe_trampoline) #endif /* CONFIG_KPROBES */