repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
aixcc-public/challenge-001-exemplar-source
3,635
arch/mips/kernel/relocate_kernel.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * relocate_kernel.S for kexec * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006 */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/addrspace.h> #include <kernel-entry-init.h> LEAF(relocate_new_kernel) PTR_L a0, arg0 PTR_L a1, arg1 PTR_L a2, arg2 PTR_L a3, arg3 PTR_L s0, kexec_indirection_page PTR_L s1, kexec_start_address process_entry: PTR_L s2, (s0) PTR_ADDIU s0, s0, SZREG /* * In case of a kdump/crash kernel, the indirection page is not * populated as the kernel is directly copied to a reserved location */ beqz s2, done /* destination page */ and s3, s2, 0x1 beq s3, zero, 1f and s4, s2, ~0x1 /* store destination addr in s4 */ b process_entry 1: /* indirection page, update s0 */ and s3, s2, 0x2 beq s3, zero, 1f and s0, s2, ~0x2 b process_entry 1: /* done page */ and s3, s2, 0x4 beq s3, zero, 1f b done 1: /* source page */ and s3, s2, 0x8 beq s3, zero, process_entry and s2, s2, ~0x8 li s6, (1 << _PAGE_SHIFT) / SZREG copy_word: /* copy page word by word */ REG_L s5, (s2) REG_S s5, (s4) PTR_ADDIU s4, s4, SZREG PTR_ADDIU s2, s2, SZREG LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word b process_entry done: #ifdef CONFIG_SMP /* kexec_flag reset is signal to other CPUs what kernel was moved to it's location. Note - we need relocated address of kexec_flag. */ bal 1f 1: move t1,ra; PTR_LA t2,1b PTR_LA t0,kexec_flag PTR_SUB t0,t0,t2; PTR_ADD t0,t1,t0; LONG_S zero,(t0) #endif #ifdef CONFIG_CPU_CAVIUM_OCTEON /* We need to flush I-cache before jumping to new kernel. * Unfortunately, this code is cpu-specific. */ .set push .set noreorder syncw syncw synci 0($0) .set pop #else sync #endif /* jump to kexec_start_address */ j s1 END(relocate_new_kernel) #ifdef CONFIG_SMP /* * Other CPUs should wait until code is relocated and * then start at entry (?) point. */ LEAF(kexec_smp_wait) PTR_L a0, s_arg0 PTR_L a1, s_arg1 PTR_L a2, s_arg2 PTR_L a3, s_arg3 PTR_L s1, kexec_start_address /* Non-relocated address works for args and kexec_start_address ( old * kernel is not overwritten). But we need relocated address of * kexec_flag. */ bal 1f 1: move t1,ra; PTR_LA t2,1b PTR_LA t0,kexec_flag PTR_SUB t0,t0,t2; PTR_ADD t0,t1,t0; 1: LONG_L s0, (t0) bne s0, zero,1b #ifdef USE_KEXEC_SMP_WAIT_FINAL kexec_smp_wait_final #else sync #endif j s1 END(kexec_smp_wait) #endif #ifdef __mips64 /* all PTR's must be aligned to 8 byte in 64-bit mode */ .align 3 #endif /* All parameters to new kernel are passed in registers a0-a3. * kexec_args[0..3] are used to prepare register values. */ EXPORT(kexec_args) arg0: PTR_WD 0x0 arg1: PTR_WD 0x0 arg2: PTR_WD 0x0 arg3: PTR_WD 0x0 .size kexec_args,PTRSIZE*4 #ifdef CONFIG_SMP /* * Secondary CPUs may have different kernel parameters in * their registers a0-a3. secondary_kexec_args[0..3] are used * to prepare register values. */ EXPORT(secondary_kexec_args) s_arg0: PTR_WD 0x0 s_arg1: PTR_WD 0x0 s_arg2: PTR_WD 0x0 s_arg3: PTR_WD 0x0 .size secondary_kexec_args,PTRSIZE*4 kexec_flag: LONG 0x1 #endif EXPORT(kexec_start_address) PTR_WD 0x0 .size kexec_start_address, PTRSIZE EXPORT(kexec_indirection_page) PTR_WD 0 .size kexec_indirection_page, PTRSIZE relocate_new_kernel_end: EXPORT(relocate_new_kernel_size) PTR_WD relocate_new_kernel_end - relocate_new_kernel .size relocate_new_kernel_size, PTRSIZE
aixcc-public/challenge-001-exemplar-source
13,311
arch/mips/kernel/bmips_5xxx_init.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2011-2012 by Broadcom Corporation * * Init for bmips 5000. * Used to init second core in dual core 5000's. */ #include <linux/init.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/addrspace.h> #include <asm/hazards.h> #include <asm/bmips.h> #ifdef CONFIG_CPU_BMIPS5000 #define cacheop(kva, size, linesize, op) \ .set noreorder ; \ addu t1, kva, size ; \ subu t2, linesize, 1 ; \ not t2 ; \ and t0, kva, t2 ; \ addiu t1, t1, -1 ; \ and t1, t2 ; \ 9: cache op, 0(t0) ; \ bne t0, t1, 9b ; \ addu t0, linesize ; \ .set reorder ; #define IS_SHIFT 22 #define IL_SHIFT 19 #define IA_SHIFT 16 #define DS_SHIFT 13 #define DL_SHIFT 10 #define DA_SHIFT 7 #define IS_MASK 7 #define IL_MASK 7 #define IA_MASK 7 #define DS_MASK 7 #define DL_MASK 7 #define DA_MASK 7 #define ICE_MASK 0x80000000 #define DCE_MASK 0x40000000 #define CP0_BRCM_CONFIG0 $22, 0 #define CP0_BRCM_MODE $22, 1 #define CP0_CONFIG_K0_MASK 7 #define CP0_ICACHE_TAG_LO $28 #define CP0_ICACHE_DATA_LO $28, 1 #define CP0_DCACHE_TAG_LO $28, 2 #define CP0_D_SEC_CACHE_DATA_LO $28, 3 #define CP0_ICACHE_TAG_HI $29 #define CP0_ICACHE_DATA_HI $29, 1 #define CP0_DCACHE_TAG_HI $29, 2 #define CP0_BRCM_MODE_Luc_MASK (1 << 11) #define CP0_BRCM_CONFIG0_CWF_MASK (1 << 20) #define CP0_BRCM_CONFIG0_TSE_MASK (1 << 19) #define CP0_BRCM_MODE_SET_MASK (1 << 7) #define CP0_BRCM_MODE_ClkRATIO_MASK (7 << 4) #define CP0_BRCM_MODE_BrPRED_MASK (3 << 24) #define CP0_BRCM_MODE_BrPRED_SHIFT 24 #define CP0_BRCM_MODE_BrHIST_MASK (0x1f << 20) #define CP0_BRCM_MODE_BrHIST_SHIFT 20 /* ZSC L2 Cache Register Access Register Definitions */ #define BRCM_ZSC_ALL_REGS_SELECT 0x7 << 24 #define BRCM_ZSC_CONFIG_REG 0 << 3 #define BRCM_ZSC_REQ_BUFFER_REG 2 << 3 #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG0 4 << 3 #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG1 6 << 3 #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG2 8 << 3 #define BRCM_ZSC_SCB0_ADDR_MAPPING_REG0 0xa << 3 #define BRCM_ZSC_SCB0_ADDR_MAPPING_REG1 0xc << 3 #define BRCM_ZSC_SCB1_ADDR_MAPPING_REG0 0xe << 3 #define BRCM_ZSC_SCB1_ADDR_MAPPING_REG1 0x10 << 3 #define BRCM_ZSC_CONFIG_LMB1En 1 << (15) #define BRCM_ZSC_CONFIG_LMB0En 1 << (14) /* branch predition values */ #define BRCM_BrPRED_ALL_TAKEN (0x0) #define BRCM_BrPRED_ALL_NOT_TAKEN (0x1) #define BRCM_BrPRED_BHT_ENABLE (0x2) #define BRCM_BrPRED_PREDICT_BACKWARD (0x3) .align 2 /* * Function: size_i_cache * Arguments: None * Returns: v0 = i cache size, v1 = I cache line size * Description: compute the I-cache size and I-cache line size * Trashes: v0, v1, a0, t0 * * pseudo code: * */ LEAF(size_i_cache) .set noreorder mfc0 a0, CP0_CONFIG, 1 move t0, a0 /* * Determine sets per way: IS * * This field contains the number of sets (i.e., indices) per way of * the instruction cache: * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k * vi) 0x5 - 0x7: Reserved. */ srl a0, a0, IS_SHIFT and a0, a0, IS_MASK /* sets per way = (64<<IS) */ li v0, 0x40 sllv v0, v0, a0 /* * Determine line size * * This field contains the line size of the instruction cache: * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii) * 0x5: 64 bytes, iv) the rest: Reserved. */ move a0, t0 srl a0, a0, IL_SHIFT and a0, a0, IL_MASK beqz a0, no_i_cache nop /* line size = 2 ^ (IL+1) */ addi a0, a0, 1 li v1, 1 sll v1, v1, a0 /* v0 now have sets per way, multiply it by line size now * that will give the set size */ sll v0, v0, a0 /* * Determine set associativity * * This field contains the set associativity of the instruction cache. * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: * 4-way, v) 0x4 - 0x7: Reserved. */ move a0, t0 srl a0, a0, IA_SHIFT and a0, a0, IA_MASK addi a0, a0, 0x1 /* v0 has the set size, multiply it by * set associativiy, to get the cache size */ multu v0, a0 /*multu is interlocked, so no need to insert nops */ mflo v0 b 1f nop no_i_cache: move v0, zero move v1, zero 1: jr ra nop .set reorder END(size_i_cache) /* * Function: size_d_cache * Arguments: None * Returns: v0 = d cache size, v1 = d cache line size * Description: compute the D-cache size and D-cache line size. * Trashes: v0, v1, a0, t0 * */ LEAF(size_d_cache) .set noreorder mfc0 a0, CP0_CONFIG, 1 move t0, a0 /* * Determine sets per way: IS * * This field contains the number of sets (i.e., indices) per way of * the instruction cache: * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k * vi) 0x5 - 0x7: Reserved. */ srl a0, a0, DS_SHIFT and a0, a0, DS_MASK /* sets per way = (64<<IS) */ li v0, 0x40 sllv v0, v0, a0 /* * Determine line size * * This field contains the line size of the instruction cache: * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii) * 0x5: 64 bytes, iv) the rest: Reserved. */ move a0, t0 srl a0, a0, DL_SHIFT and a0, a0, DL_MASK beqz a0, no_d_cache nop /* line size = 2 ^ (IL+1) */ addi a0, a0, 1 li v1, 1 sll v1, v1, a0 /* v0 now have sets per way, multiply it by line size now * that will give the set size */ sll v0, v0, a0 /* determine set associativity * * This field contains the set associativity of the instruction cache. * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: * 4-way, v) 0x4 - 0x7: Reserved. */ move a0, t0 srl a0, a0, DA_SHIFT and a0, a0, DA_MASK addi a0, a0, 0x1 /* v0 has the set size, multiply it by * set associativiy, to get the cache size */ multu v0, a0 /*multu is interlocked, so no need to insert nops */ mflo v0 b 1f nop no_d_cache: move v0, zero move v1, zero 1: jr ra nop .set reorder END(size_d_cache) /* * Function: enable_ID * Arguments: None * Returns: None * Description: Enable I and D caches, initialize I and D-caches, also set * hardware delay for d-cache (TP0). * Trashes: t0 * */ .global enable_ID .ent enable_ID .set noreorder enable_ID: mfc0 t0, CP0_BRCM_CONFIG0 or t0, t0, (ICE_MASK | DCE_MASK) mtc0 t0, CP0_BRCM_CONFIG0 jr ra nop .end enable_ID .set reorder /* * Function: l1_init * Arguments: None * Returns: None * Description: Enable I and D caches, and initialize I and D-caches * Trashes: a0, v0, v1, t0, t1, t2, t8 * */ .globl l1_init .ent l1_init .set noreorder l1_init: /* save return address */ move t8, ra /* initialize I and D cache Data and Tag registers. */ mtc0 zero, CP0_ICACHE_TAG_LO mtc0 zero, CP0_ICACHE_TAG_HI mtc0 zero, CP0_ICACHE_DATA_LO mtc0 zero, CP0_ICACHE_DATA_HI mtc0 zero, CP0_DCACHE_TAG_LO mtc0 zero, CP0_DCACHE_TAG_HI /* Enable Caches before Clearing. If the caches are disabled * then the cache operations to clear the cache will be ignored */ jal enable_ID nop jal size_i_cache /* v0 = i-cache size, v1 = i-cache line size */ nop /* run uncached in kseg 1 */ la k0, 1f lui k1, 0x2000 or k0, k1, k0 jr k0 nop 1: /* * set K0 cache mode */ mfc0 t0, CP0_CONFIG and t0, t0, ~CP0_CONFIG_K0_MASK or t0, t0, 3 /* Write Back mode */ mtc0 t0, CP0_CONFIG /* * Initialize instruction cache. */ li a0, KSEG0 cacheop(a0, v0, v1, Index_Store_Tag_I) /* * Now we can run from I-$, kseg 0 */ la k0, 1f lui k1, 0x2000 or k0, k1, k0 xor k0, k1, k0 jr k0 nop 1: /* * Initialize data cache. */ jal size_d_cache /* v0 = d-cache size, v1 = d-cache line size */ nop li a0, KSEG0 cacheop(a0, v0, v1, Index_Store_Tag_D) jr t8 nop .end l1_init .set reorder /* * Function: set_other_config * Arguments: none * Returns: None * Description: initialize other remainder configuration to defaults. * Trashes: t0, t1 * * pseudo code: * */ LEAF(set_other_config) .set noreorder /* enable Bus error for I-fetch */ mfc0 t0, CP0_CACHEERR, 0 li t1, 0x4 or t0, t1 mtc0 t0, CP0_CACHEERR, 0 /* enable Bus error for Load */ mfc0 t0, CP0_CACHEERR, 1 li t1, 0x4 or t0, t1 mtc0 t0, CP0_CACHEERR, 1 /* enable Bus Error for Store */ mfc0 t0, CP0_CACHEERR, 2 li t1, 0x4 or t0, t1 mtc0 t0, CP0_CACHEERR, 2 jr ra nop .set reorder END(set_other_config) /* * Function: set_branch_pred * Arguments: none * Returns: None * Description: * Trashes: t0, t1 * * pseudo code: * */ LEAF(set_branch_pred) .set noreorder mfc0 t0, CP0_BRCM_MODE li t1, ~(CP0_BRCM_MODE_BrPRED_MASK | CP0_BRCM_MODE_BrHIST_MASK ) and t0, t0, t1 /* enable Branch prediction */ li t1, BRCM_BrPRED_BHT_ENABLE sll t1, CP0_BRCM_MODE_BrPRED_SHIFT or t0, t0, t1 /* set history count to 8 */ li t1, 8 sll t1, CP0_BRCM_MODE_BrHIST_SHIFT or t0, t0, t1 mtc0 t0, CP0_BRCM_MODE jr ra nop .set reorder END(set_branch_pred) /* * Function: set_luc * Arguments: set link uncached. * Returns: None * Description: * Trashes: t0, t1 * */ LEAF(set_luc) .set noreorder mfc0 t0, CP0_BRCM_MODE li t1, ~(CP0_BRCM_MODE_Luc_MASK) and t0, t0, t1 /* set Luc */ ori t0, t0, CP0_BRCM_MODE_Luc_MASK mtc0 t0, CP0_BRCM_MODE jr ra nop .set reorder END(set_luc) /* * Function: set_cwf_tse * Arguments: set CWF and TSE bits * Returns: None * Description: * Trashes: t0, t1 * */ LEAF(set_cwf_tse) .set noreorder mfc0 t0, CP0_BRCM_CONFIG0 li t1, (CP0_BRCM_CONFIG0_CWF_MASK | CP0_BRCM_CONFIG0_TSE_MASK) or t0, t0, t1 mtc0 t0, CP0_BRCM_CONFIG0 jr ra nop .set reorder END(set_cwf_tse) /* * Function: set_clock_ratio * Arguments: set clock ratio specified by a0 * Returns: None * Description: * Trashes: v0, v1, a0, a1 * * pseudo code: * */ LEAF(set_clock_ratio) .set noreorder mfc0 t0, CP0_BRCM_MODE li t1, ~(CP0_BRCM_MODE_SET_MASK | CP0_BRCM_MODE_ClkRATIO_MASK) and t0, t0, t1 li t1, CP0_BRCM_MODE_SET_MASK or t0, t0, t1 or t0, t0, a0 mtc0 t0, CP0_BRCM_MODE jr ra nop .set reorder END(set_clock_ratio) /* * Function: set_zephyr * Arguments: None * Returns: None * Description: Set any zephyr bits * Trashes: t0 & t1 * */ LEAF(set_zephyr) .set noreorder /* enable read/write of CP0 #22 sel. 8 */ li t0, 0x5a455048 .word 0x4088b00f /* mtc0 t0, $22, 15 */ .word 0x4008b008 /* mfc0 t0, $22, 8 */ li t1, 0x09008000 /* turn off pref, jtb */ or t0, t0, t1 .word 0x4088b008 /* mtc0 t0, $22, 8 */ sync /* disable read/write of CP0 #22 sel 8 */ li t0, 0x0 .word 0x4088b00f /* mtc0 t0, $22, 15 */ jr ra nop .set reorder END(set_zephyr) /* * Function: set_llmb * Arguments: a0=0 disable llmb, a0=1 enables llmb * Returns: None * Description: * Trashes: t0, t1, t2 * * pseudo code: * */ LEAF(set_llmb) .set noreorder li t2, 0x90000000 | BRCM_ZSC_ALL_REGS_SELECT | BRCM_ZSC_CONFIG_REG sync cache 0x7, 0x0(t2) sync mfc0 t0, CP0_D_SEC_CACHE_DATA_LO li t1, ~(BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En) and t0, t0, t1 beqz a0, svlmb nop enable_lmb: li t1, (BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En) or t0, t0, t1 svlmb: mtc0 t0, CP0_D_SEC_CACHE_DATA_LO sync cache 0xb, 0x0(t2) sync jr ra nop .set reorder END(set_llmb) /* * Function: core_init * Arguments: none * Returns: None * Description: initialize core related configuration * Trashes: v0,v1,a0,a1,t8 * * pseudo code: * */ .globl core_init .ent core_init .set noreorder core_init: move t8, ra /* set Zephyr bits. */ bal set_zephyr nop /* set low latency memory bus */ li a0, 1 bal set_llmb nop /* set branch prediction (TP0 only) */ bal set_branch_pred nop /* set link uncached */ bal set_luc nop /* set CWF and TSE */ bal set_cwf_tse nop /* *set clock ratio by setting 1 to 'set' * and 0 to ClkRatio, (TP0 only) */ li a0, 0 bal set_clock_ratio nop /* set other configuration to defaults */ bal set_other_config nop move ra, t8 jr ra nop .set reorder .end core_init /* * Function: clear_jump_target_buffer * Arguments: None * Returns: None * Description: * Trashes: t0, t1, t2 * */ #define RESET_CALL_RETURN_STACK_THIS_THREAD (0x06<<16) #define RESET_JUMP_TARGET_BUFFER_THIS_THREAD (0x04<<16) #define JTB_CS_CNTL_MASK (0xFF<<16) .globl clear_jump_target_buffer .ent clear_jump_target_buffer .set noreorder clear_jump_target_buffer: mfc0 t0, $22, 2 nop nop li t1, ~JTB_CS_CNTL_MASK and t0, t0, t1 li t2, RESET_CALL_RETURN_STACK_THIS_THREAD or t0, t0, t2 mtc0 t0, $22, 2 nop nop and t0, t0, t1 li t2, RESET_JUMP_TARGET_BUFFER_THIS_THREAD or t0, t0, t2 mtc0 t0, $22, 2 nop nop jr ra nop .end clear_jump_target_buffer .set reorder /* * Function: bmips_cache_init * Arguments: None * Returns: None * Description: Enable I and D caches, and initialize I and D-caches * Trashes: v0, v1, t0, t1, t2, t5, t7, t8 * */ .globl bmips_5xxx_init .ent bmips_5xxx_init .set noreorder bmips_5xxx_init: /* save return address and A0 */ move t7, ra move t5, a0 jal l1_init nop jal core_init nop jal clear_jump_target_buffer nop mtc0 zero, CP0_CAUSE move a0, t5 jr t7 nop .end bmips_5xxx_init .set reorder #endif
aixcc-public/challenge-001-exemplar-source
1,546
arch/mips/kernel/r4k_switch.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1994, 1995, 1996, by Andreas Busse * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ #include <asm/asm.h> #include <asm/cachectl.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/asmmacro.h> /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) */ .align 5 LEAF(resume) mfc0 t1, CP0_STATUS LONG_S t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ move $28, a2 cpu_restore_nonscratch a1 PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 LONG_L a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS move v0, a0 jr ra END(resume)
aixcc-public/challenge-001-exemplar-source
2,752
arch/mips/kernel/scall64-n64.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/asm-offsets.h> #include <asm/sysmips.h> #include <asm/thread_info.h> #include <asm/unistd.h> #ifndef CONFIG_MIPS32_COMPAT /* Neither O32 nor N32, so define handle_sys here */ #define handle_sys64 handle_sys #endif .align 5 NESTED(handle_sys64, PT_SIZE, sp) #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) /* * When 32-bit compatibility is configured scall_o32.S * already did this. */ .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at #endif #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif sd a3, PT_R26(sp) # save a3 for syscall restarting li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, syscall_trace_entry syscall_common: dsubu t2, v0, __NR_64_Linux sltiu t0, t2, __NR_64_Linux_syscalls beqz t0, illegal_syscall dsll t0, t2, 3 # offset into table dla t2, sys_call_table daddu t0, t2, t0 ld t2, (t0) # syscall routine beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sd t0, PT_R7(sp) # set error flag beqz t0, 1f ld t1, PT_R2(sp) # syscall number dnegu v0 # error sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result n64_syscall_exit: j syscall_exit_partial /* ------------------------------------------------------------------------ */ syscall_trace_entry: SAVE_STATIC move a0, sp move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) j syscall_common 1: j syscall_exit illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ li v0, ENOSYS # error sd v0, PT_R2(sp) li t0, 1 # set error flag sd t0, PT_R7(sp) j n64_syscall_exit END(handle_sys64) #define __SYSCALL(nr, entry) PTR_WD entry .align 3 .type sys_call_table, @object EXPORT(sys_call_table) #include <asm/syscall_table_n64.h>
aixcc-public/challenge-001-exemplar-source
5,058
arch/mips/kernel/cps-vec-ns16550.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2015 Imagination Technologies * Author: Paul Burton <paul.burton@mips.com> */ #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <linux/serial_reg.h> #define UART_TX_OFS (UART_TX << CONFIG_MIPS_CPS_NS16550_SHIFT) #define UART_LSR_OFS (UART_LSR << CONFIG_MIPS_CPS_NS16550_SHIFT) #if CONFIG_MIPS_CPS_NS16550_WIDTH == 1 # define UART_L lb # define UART_S sb #elif CONFIG_MIPS_CPS_NS16550_WIDTH == 2 # define UART_L lh # define UART_S sh #elif CONFIG_MIPS_CPS_NS16550_WIDTH == 4 # define UART_L lw # define UART_S sw #else # define UART_L lb # define UART_S sb #endif /** * _mips_cps_putc() - write a character to the UART * @a0: ASCII character to write * @t9: UART base address */ LEAF(_mips_cps_putc) 1: UART_L t0, UART_LSR_OFS(t9) andi t0, t0, UART_LSR_TEMT beqz t0, 1b UART_S a0, UART_TX_OFS(t9) jr ra END(_mips_cps_putc) /** * _mips_cps_puts() - write a string to the UART * @a0: pointer to NULL-terminated ASCII string * @t9: UART base address * * Write a null-terminated ASCII string to the UART. */ NESTED(_mips_cps_puts, 0, ra) move s7, ra move s6, a0 1: lb a0, 0(s6) beqz a0, 2f jal _mips_cps_putc PTR_ADDIU s6, s6, 1 b 1b 2: jr s7 END(_mips_cps_puts) /** * _mips_cps_putx4 - write a 4b hex value to the UART * @a0: the 4b value to write to the UART * @t9: UART base address * * Write a single hexadecimal character to the UART. */ NESTED(_mips_cps_putx4, 0, ra) andi a0, a0, 0xf li t0, '0' blt a0, 10, 1f li t0, 'a' addiu a0, a0, -10 1: addu a0, a0, t0 b _mips_cps_putc END(_mips_cps_putx4) /** * _mips_cps_putx8 - write an 8b hex value to the UART * @a0: the 8b value to write to the UART * @t9: UART base address * * Write an 8 bit value (ie. 2 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx8, 0, ra) move s3, ra move s2, a0 srl a0, a0, 4 jal _mips_cps_putx4 move a0, s2 move ra, s3 b _mips_cps_putx4 END(_mips_cps_putx8) /** * _mips_cps_putx16 - write a 16b hex value to the UART * @a0: the 16b value to write to the UART * @t9: UART base address * * Write a 16 bit value (ie. 4 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx16, 0, ra) move s5, ra move s4, a0 srl a0, a0, 8 jal _mips_cps_putx8 move a0, s4 move ra, s5 b _mips_cps_putx8 END(_mips_cps_putx16) /** * _mips_cps_putx32 - write a 32b hex value to the UART * @a0: the 32b value to write to the UART * @t9: UART base address * * Write a 32 bit value (ie. 8 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx32, 0, ra) move s7, ra move s6, a0 srl a0, a0, 16 jal _mips_cps_putx16 move a0, s6 move ra, s7 b _mips_cps_putx16 END(_mips_cps_putx32) #ifdef CONFIG_64BIT /** * _mips_cps_putx64 - write a 64b hex value to the UART * @a0: the 64b value to write to the UART * @t9: UART base address * * Write a 64 bit value (ie. 16 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx64, 0, ra) move sp, ra move s8, a0 dsrl32 a0, a0, 0 jal _mips_cps_putx32 move a0, s8 move ra, sp b _mips_cps_putx32 END(_mips_cps_putx64) #define _mips_cps_putxlong _mips_cps_putx64 #else /* !CONFIG_64BIT */ #define _mips_cps_putxlong _mips_cps_putx32 #endif /* !CONFIG_64BIT */ /** * mips_cps_bev_dump() - dump relevant exception state to UART * @a0: pointer to NULL-terminated ASCII string naming the exception * * Write information that may be useful in debugging an exception to the * UART configured by CONFIG_MIPS_CPS_NS16550_*. As this BEV exception * will only be run if something goes horribly wrong very early during * the bringup of a core and it is very likely to be unsafe to perform * memory accesses at that point (cache state indeterminate, EVA may not * be configured, coherence may be disabled) let alone have a stack, * this is all written in assembly using only registers & unmapped * uncached access to the UART registers. */ LEAF(mips_cps_bev_dump) move s0, ra move s1, a0 li t9, CKSEG1ADDR(CONFIG_MIPS_CPS_NS16550_BASE) PTR_LA a0, str_newline jal _mips_cps_puts PTR_LA a0, str_bev jal _mips_cps_puts move a0, s1 jal _mips_cps_puts PTR_LA a0, str_newline jal _mips_cps_puts PTR_LA a0, str_newline jal _mips_cps_puts #define DUMP_COP0_REG(reg, name, sz, _mfc0) \ PTR_LA a0, 8f; \ jal _mips_cps_puts; \ _mfc0 a0, reg; \ jal _mips_cps_putx##sz; \ PTR_LA a0, str_newline; \ jal _mips_cps_puts; \ TEXT(name) DUMP_COP0_REG(CP0_CAUSE, "Cause: 0x", 32, mfc0) DUMP_COP0_REG(CP0_STATUS, "Status: 0x", 32, mfc0) DUMP_COP0_REG(CP0_EBASE, "EBase: 0x", long, MFC0) DUMP_COP0_REG(CP0_BADVADDR, "BadVAddr: 0x", long, MFC0) DUMP_COP0_REG(CP0_BADINSTR, "BadInstr: 0x", 32, mfc0) PTR_LA a0, str_newline jal _mips_cps_puts jr s0 END(mips_cps_bev_dump) .pushsection .data str_bev: .asciiz "BEV Exception: " str_newline: .asciiz "\r\n" .popsection
aixcc-public/challenge-001-exemplar-source
2,548
arch/mips/kernel/scall64-n32.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/unistd.h> #ifndef CONFIG_MIPS32_O32 /* No O32, so define handle_sys here */ #define handle_sysn32 handle_sys #endif .align 5 NESTED(handle_sysn32, PT_SIZE, sp) #ifndef CONFIG_MIPS32_O32 .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at #endif dsubu t0, v0, __NR_N32_Linux # check syscall number sltiu t0, t0, __NR_N32_Linux_syscalls #ifndef CONFIG_MIPS32_O32 ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif beqz t0, not_n32_scall sd a3, PT_R26(sp) # save a3 for syscall restarting li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, n32_syscall_trace_entry syscall_common: dsll t0, v0, 3 # offset into table ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0) jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sd t0, PT_R7(sp) # set error flag beqz t0, 1f ld t1, PT_R2(sp) # syscall number dnegu v0 # error sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result j syscall_exit_partial /* ------------------------------------------------------------------------ */ n32_syscall_trace_entry: SAVE_STATIC move a0, sp move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) dsubu t2, v0, __NR_N32_Linux # check (new) syscall number sltiu t0, t2, __NR_N32_Linux_syscalls beqz t0, not_n32_scall j syscall_common 1: j syscall_exit not_n32_scall: /* This is not an n32 compatibility syscall, pass it on to the n64 syscall handlers. */ j handle_sys64 END(handle_sysn32) #define __SYSCALL(nr, entry) PTR_WD entry .type sysn32_call_table, @object EXPORT(sysn32_call_table) #include <asm/syscall_table_n32.h>
aixcc-public/challenge-001-exemplar-source
1,495
arch/mips/kernel/r2300_switch.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * r2300_switch.S: R2300 specific task switching code. * * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle * Copyright (C) 1994, 1995, 1996 by Andreas Busse * * Multi-cpu abstraction and macros for easier reading: * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * Further modifications to make this work: * Copyright (c) 1998-2000 Harald Koerfgen */ #include <asm/asm.h> #include <asm/cachectl.h> #include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/asmmacro.h> .set mips1 .align 5 /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) */ LEAF(resume) mfc0 t1, CP0_STATUS sw t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ move $28, a2 cpu_restore_nonscratch a1 addiu t1, $28, _THREAD_SIZE - 32 sw t1, kernelsp mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 lw a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS move v0, a0 jr ra END(resume)
aixcc-public/challenge-001-exemplar-source
5,267
arch/mips/kernel/scall64-o32.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2004 Thiemo Seufer * * Hairy, the userspace application uses a different argument passing * convention than the kernel, so we have to translate things from o32 * to ABI64 calling convention. 64-bit syscalls are also processed * here for now. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/sysmips.h> .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at ld t1, PT_EPC(sp) # skip syscall on return dsubu t0, v0, __NR_O32_Linux # check syscall number sltiu t0, t0, __NR_O32_Linux_syscalls daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) beqz t0, not_o32_scall #if 0 SAVE_ALL move a1, v0 ASM_PRINT("Scall %ld\n") RESTORE_ALL #endif /* We don't want to stumble over broken sign extensions from userland. O32 does never use the upper half. */ sll a0, a0, 0 sll a1, a1, 0 sll a2, a2, 0 sll a3, a3, 0 sd a3, PT_R26(sp) # save a3 for syscall restarting /* * More than four arguments. Try to deal with it by copying the * stack arguments from the user stack to the kernel stack. * This Sucks (TM). * * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ ld t0, PT_R29(sp) # get old user stack pointer daddu t1, t0, 32 bltz t1, bad_stack load_a4: lw a4, 16(t0) # argument #5 from usp load_a5: lw a5, 20(t0) # argument #6 from usp load_a6: lw a6, 24(t0) # argument #7 from usp load_a7: lw a7, 28(t0) # argument #8 from usp loads_done: .section __ex_table,"a" PTR_WD load_a4, bad_stack_a4 PTR_WD load_a5, bad_stack_a5 PTR_WD load_a6, bad_stack_a6 PTR_WD load_a7, bad_stack_a7 .previous li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, trace_a_syscall syscall_common: dsll t0, v0, 3 # offset into table ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0) jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sd t0, PT_R7(sp) # set error flag beqz t0, 1f ld t1, PT_R2(sp) # syscall number dnegu v0 # error sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result o32_syscall_exit: j syscall_exit_partial /* ------------------------------------------------------------------------ */ trace_a_syscall: SAVE_STATIC sd a4, PT_R8(sp) # Save argument registers sd a5, PT_R9(sp) sd a6, PT_R10(sp) sd a7, PT_R11(sp) # For indirect syscalls move a0, sp /* * absolute syscall number is in v0 unless we called syscall(__NR_###) * where the real syscall number is in a0 * note: NR_syscall is the first O32 syscall but the macro is * only defined when compiling with -mabi=32 (CONFIG_32BIT) * therefore __NR_O32_Linux is used (4000) */ .set push .set reorder subu t1, v0, __NR_O32_Linux move a1, v0 bnez t1, 1f /* __NR_syscall at offset 0 */ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ .set pop 1: jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) ld a6, PT_R10(sp) ld a7, PT_R11(sp) # For indirect syscalls dsubu t0, v0, __NR_O32_Linux # check (new) syscall number sltiu t0, t0, __NR_O32_Linux_syscalls beqz t0, not_o32_scall j syscall_common 1: j syscall_exit /* ------------------------------------------------------------------------ */ /* * The stackpointer for a call with more than 4 arguments is bad. */ bad_stack: li v0, EFAULT sd v0, PT_R2(sp) li t0, 1 # set error flag sd t0, PT_R7(sp) j o32_syscall_exit bad_stack_a4: li a4, 0 b load_a5 bad_stack_a5: li a5, 0 b load_a6 bad_stack_a6: li a6, 0 b load_a7 bad_stack_a7: li a7, 0 b loads_done not_o32_scall: /* * This is not an o32 compatibility syscall, pass it on * to the 64-bit syscall handlers. */ #ifdef CONFIG_MIPS32_N32 j handle_sysn32 #else j handle_sys64 #endif END(handle_sys) LEAF(sys32_syscall) subu t0, a0, __NR_O32_Linux # check syscall number sltiu v0, t0, __NR_O32_Linux_syscalls beqz t0, einval # do not recurse dsll t1, t0, 3 beqz v0, einval ld t2, sys32_call_table(t1) # syscall routine move a0, a1 # shift argument registers move a1, a2 move a2, a3 move a3, a4 move a4, a5 move a5, a6 move a6, a7 jr t2 /* Unreached */ einval: li v0, -ENOSYS jr ra END(sys32_syscall) #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #define __SYSCALL(nr, entry) PTR_WD entry .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) #include <asm/syscall_table_o32.h>
aixcc-public/challenge-001-exemplar-source
4,466
arch/mips/kernel/mcount.S
/* * MIPS specific _mcount support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive for * more details. * * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China * Copyright (C) 2010 DSLab, Lanzhou University, China * Author: Wu Zhangjin <wuzhangjin@gmail.com> */ #include <asm/export.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/ftrace.h> .text .set noreorder .set noat .macro MCOUNT_SAVE_REGS PTR_SUBU sp, PT_SIZE PTR_S ra, PT_R31(sp) PTR_S AT, PT_R1(sp) PTR_S a0, PT_R4(sp) PTR_S a1, PT_R5(sp) PTR_S a2, PT_R6(sp) PTR_S a3, PT_R7(sp) #ifdef CONFIG_64BIT PTR_S a4, PT_R8(sp) PTR_S a5, PT_R9(sp) PTR_S a6, PT_R10(sp) PTR_S a7, PT_R11(sp) #endif .endm .macro MCOUNT_RESTORE_REGS PTR_L ra, PT_R31(sp) PTR_L AT, PT_R1(sp) PTR_L a0, PT_R4(sp) PTR_L a1, PT_R5(sp) PTR_L a2, PT_R6(sp) PTR_L a3, PT_R7(sp) #ifdef CONFIG_64BIT PTR_L a4, PT_R8(sp) PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) #endif PTR_ADDIU sp, PT_SIZE .endm .macro RETURN_BACK jr ra move ra, AT .endm /* * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass * the location of the parent's return address. */ #define MCOUNT_RA_ADDRESS_REG $12 #ifdef CONFIG_DYNAMIC_FTRACE NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: EXPORT_SYMBOL(_mcount) b ftrace_stub #ifdef CONFIG_32BIT addiu sp,sp,8 #else nop #endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ MCOUNT_SAVE_REGS #ifdef KBUILD_MCOUNT_RA_ADDRESS PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) #endif PTR_SUBU a0, ra, 8 /* arg1: self address */ PTR_LA t1, _stext sltu t2, a0, t1 /* t2 = (a0 < _stext) */ PTR_LA t1, _etext sltu t3, t1, a0 /* t3 = (a0 > _etext) */ or t1, t2, t3 beqz t1, ftrace_call nop #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */ #else PTR_SUBU a0, a0, 12 #endif .globl ftrace_call ftrace_call: nop /* a placeholder for the call to a real tracing function */ move a1, AT /* arg2: parent's return address */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: nop nop #endif MCOUNT_RESTORE_REGS .globl ftrace_stub ftrace_stub: RETURN_BACK END(ftrace_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ NESTED(_mcount, PT_SIZE, ra) EXPORT_SYMBOL(_mcount) PTR_LA t1, ftrace_stub PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ beq t1, t2, fgraph_trace nop MCOUNT_SAVE_REGS move a0, ra /* arg1: self return address */ jalr t2 /* (1) call *ftrace_trace_function */ move a1, AT /* arg2: parent's return address */ MCOUNT_RESTORE_REGS fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER PTR_LA t1, ftrace_stub PTR_L t3, ftrace_graph_return bne t1, t3, ftrace_graph_caller nop PTR_LA t1, ftrace_graph_entry_stub PTR_L t3, ftrace_graph_entry bne t1, t3, ftrace_graph_caller nop #endif #ifdef CONFIG_32BIT addiu sp, sp, 8 #endif .globl ftrace_stub ftrace_stub: RETURN_BACK END(_mcount) #endif /* ! CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER NESTED(ftrace_graph_caller, PT_SIZE, ra) #ifndef CONFIG_DYNAMIC_FTRACE MCOUNT_SAVE_REGS #endif /* arg1: Get the location of the parent's return address */ #ifdef KBUILD_MCOUNT_RA_ADDRESS #ifdef CONFIG_DYNAMIC_FTRACE PTR_L a0, PT_R12(sp) #else move a0, MCOUNT_RA_ADDRESS_REG #endif bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */ nop #endif PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */ 1: /* arg2: Get self return address */ #ifdef CONFIG_DYNAMIC_FTRACE PTR_L a1, PT_R31(sp) #else move a1, ra #endif /* arg3: Get frame pointer of current stack */ #ifdef CONFIG_64BIT PTR_LA a2, PT_SIZE(sp) #else PTR_LA a2, (PT_SIZE+8)(sp) #endif jal prepare_ftrace_return nop MCOUNT_RESTORE_REGS #ifndef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_32BIT addiu sp, sp, 8 #endif #endif RETURN_BACK END(ftrace_graph_caller) .align 2 .globl return_to_handler return_to_handler: PTR_SUBU sp, PT_SIZE PTR_S v0, PT_R2(sp) jal ftrace_return_to_handler PTR_S v1, PT_R3(sp) /* restore the real parent address: v0 -> ra */ move ra, v0 PTR_L v0, PT_R2(sp) PTR_L v1, PT_R3(sp) jr ra PTR_ADDIU sp, PT_SIZE #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ .set at .set reorder
aixcc-public/challenge-001-exemplar-source
5,038
arch/mips/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/asm-offsets.h> #include <asm/thread_info.h> #define PAGE_SIZE _PAGE_SIZE /* * Put .bss..swapper_pg_dir as the first thing in .bss. This will * ensure that it has .bss alignment (64K). */ #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) /* Cavium Octeon should not have a separate PT_NOTE Program Header. */ #ifndef CONFIG_CAVIUM_OCTEON_SOC #define EMITS_PT_NOTE #endif #define RUNTIME_DISCARD_EXIT #include <asm-generic/vmlinux.lds.h> #undef mips #define mips mips OUTPUT_ARCH(mips) ENTRY(kernel_entry) PHDRS { text PT_LOAD FLAGS(7); /* RWX */ #ifndef CONFIG_CAVIUM_OCTEON_SOC note PT_NOTE FLAGS(4); /* R__ */ #endif /* CAVIUM_OCTEON_SOC */ } #ifdef CONFIG_32BIT #ifdef CONFIG_CPU_LITTLE_ENDIAN jiffies = jiffies_64; #else jiffies = jiffies_64 + 4; #endif #else jiffies = jiffies_64; #endif SECTIONS { #ifdef CONFIG_BOOT_ELF64 /* Read-only sections, merged into text segment: */ /* . = 0xc000000000000000; */ /* This is the value for an Origin kernel, taken from an IRIX kernel. */ /* . = 0xc00000000001c000; */ /* Set the vaddr for the text segment to a value * >= 0xa800 0000 0001 9000 if no symmon is going to configured * >= 0xa800 0000 0030 0000 otherwise */ /* . = 0xa800000000300000; */ . = 0xffffffff80300000; #endif . = LINKER_LOAD_ADDRESS; /* read-only */ _text = .; /* Text and read-only data */ .text : { TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); *(.got) /* Global offset table */ } :text = 0 _etext = .; /* End of text section */ EXCEPTION_TABLE(16) /* Exception table for data bus errors */ __dbe_table : { __start___dbe_table = .; KEEP(*(__dbe_table)) __stop___dbe_table = .; } _sdata = .; /* Start of data section */ RO_DATA(4096) /* writeable */ .data : { /* Data */ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ INIT_TASK_DATA(THREAD_SIZE) NOSAVE_DATA PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA CONSTRUCTORS } BUG_TABLE _gp = . + 0x8000; .lit8 : { *(.lit8) } .lit4 : { *(.lit4) } /* We want the small data sections together, so single-instruction offsets can access them all, and initialized data all before uninitialized, so we can shorten the on-disk segment size. */ .sdata : { *(.sdata) } _edata = .; /* End of data section */ /* will be freed after init */ . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) . = ALIGN(4); .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) { __mips_machines_start = .; KEEP(*(.mips.machines.init)) __mips_machines_end = .; } /* .exit.text is discarded at runtime, not link time, to deal with * references from .rodata */ .exit.text : { EXIT_TEXT } .exit.data : { EXIT_DATA } #ifdef CONFIG_SMP PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) #endif .rel.dyn : ALIGN(8) { *(.rel) *(.rel*) } #ifdef CONFIG_MIPS_ELF_APPENDED_DTB STRUCT_ALIGN(); .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) { *(.appended_dtb) KEEP(*(.appended_dtb)) } #endif #ifdef CONFIG_RELOCATABLE . = ALIGN(4); .data.reloc : { _relocation_start = .; /* * Space for relocation table * This needs to be filled so that the * relocs tool can overwrite the content. * An invalid value is left at the start of the * section to abort relocation if the table * has not been filled in. */ LONG(0xFFFFFFFF); FILL(0); . += CONFIG_RELOCATION_TABLE_SIZE - 4; _relocation_end = .; } #endif #ifdef CONFIG_MIPS_RAW_APPENDED_DTB .fill : { FILL(0); BYTE(0); STRUCT_ALIGN(); } __appended_dtb = .; /* leave space for appended DTB */ . += 0x100000; #endif /* * Align to 64K in attempt to eliminate holes before the * .bss..swapper_pg_dir section at the start of .bss. This * also satisfies PAGE_SIZE alignment as the largest page size * allowed is 64K. */ . = ALIGN(0x10000); __init_end = .; /* freed after init ends here */ /* * Force .bss to 64K alignment so that .bss..swapper_pg_dir * gets that alignment. .sbss should be empty, so there will be * no holes after __init_end. */ BSS_SECTION(0, 0x10000, 8) _end = . ; /* These mark the ABI of the kernel for debuggers. */ .mdebug.abi32 : { KEEP(*(.mdebug.abi32)) } .mdebug.abi64 : { KEEP(*(.mdebug.abi64)) } /* This is the MIPS specific mdebug section. */ .mdebug : { *(.mdebug) } STABS_DEBUG DWARF_DEBUG ELF_DETAILS /* These must appear regardless of . */ .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) } .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) } /* Sections to be discarded */ DISCARDS /DISCARD/ : { /* ABI crap starts here */ *(.MIPS.abiflags) *(.MIPS.options) *(.gnu.attributes) *(.options) *(.pdr) *(.reginfo) } }
aixcc-public/challenge-001-exemplar-source
14,543
arch/mips/kernel/genex.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2002, 2007 Maciej W. Rozycki * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/sync.h> #include <asm/thread_info.h> __INIT /* * General exception vector for all other CPUs. * * Be careful when changing this, it has to be at most 128 bytes * to fit into space reserved for the exception handler. */ NESTED(except_vec3_generic, 0, sp) .set push .set noat mfc0 k1, CP0_CAUSE andi k1, k1, 0x7c #ifdef CONFIG_64BIT dsll k1, k1, 1 #endif PTR_L k0, exception_handlers(k1) jr k0 .set pop END(except_vec3_generic) /* * General exception handler for CPUs with virtual coherency exception. * * Be careful when changing this, it has to be at most 256 (as a special * exception) bytes to fit into space reserved for the exception handler. */ NESTED(except_vec3_r4000, 0, sp) .set push .set arch=r4000 .set noat mfc0 k1, CP0_CAUSE li k0, 31<<2 andi k1, k1, 0x7c .set push .set noreorder .set nomacro beq k1, k0, handle_vced li k0, 14<<2 beq k1, k0, handle_vcei #ifdef CONFIG_64BIT dsll k1, k1, 1 #endif .set pop PTR_L k0, exception_handlers(k1) jr k0 /* * Big shit, we now may have two dirty primary cache lines for the same * physical address. We can safely invalidate the line pointed to by * c0_badvaddr because after return from this exception handler the * load / store will be re-executed. */ handle_vced: MFC0 k0, CP0_BADVADDR li k1, -4 # Is this ... and k0, k1 # ... really needed? mtc0 zero, CP0_TAGLO cache Index_Store_Tag_D, (k0) cache Hit_Writeback_Inv_SD, (k0) #ifdef CONFIG_PROC_FS PTR_LA k0, vced_count lw k1, (k0) addiu k1, 1 sw k1, (k0) #endif eret handle_vcei: MFC0 k0, CP0_BADVADDR cache Hit_Writeback_Inv_SD, (k0) # also cleans pi #ifdef CONFIG_PROC_FS PTR_LA k0, vcei_count lw k1, (k0) addiu k1, 1 sw k1, (k0) #endif eret .set pop END(except_vec3_r4000) __FINIT .align 5 /* 32 byte rollback region */ LEAF(__r4k_wait) .set push .set noreorder /* start of rollback region */ LONG_L t0, TI_FLAGS($28) nop andi t0, _TIF_NEED_RESCHED bnez t0, 1f nop nop nop #ifdef CONFIG_CPU_MICROMIPS nop nop nop nop #endif .set MIPS_ISA_ARCH_LEVEL_RAW wait /* end of rollback region (the region size must be power of two) */ 1: jr ra nop .set pop END(__r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler FEXPORT(rollback_\handler) .set push .set noat MFC0 k0, CP0_EPC PTR_LA k1, __r4k_wait ori k0, 0x1f /* 32 byte rollback region */ xori k0, 0x1f bne k0, k1, \handler MTC0 k0, CP0_EPC .set pop .endm .align 5 BUILD_ROLLBACK_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS /* * Check to see if the interrupted code has just disabled * interrupts and ignore this interrupt for now if so. * * local_irq_disable() disables interrupts and then calls * trace_hardirqs_off() to track the state. If an interrupt is taken * after interrupts are disabled but before the state is updated * it will appear to restore_all that it is incorrectly returning with * interrupts disabled */ .set push .set noat mfc0 k0, CP0_STATUS #if defined(CONFIG_CPU_R3000) and k0, ST0_IEP bnez k0, 1f mfc0 k0, CP0_EPC .set noreorder j k0 rfe #else and k0, ST0_IE bnez k0, 1f eret #endif 1: .set pop #endif SAVE_ALL docfi=1 CLI TRACE_IRQS_OFF LONG_L s0, TI_REGS($28) LONG_S sp, TI_REGS($28) /* * SAVE_ALL ensures we are using a valid kernel stack for the thread. * Check if we are already using the IRQ stack. */ move s1, sp # Preserve the sp /* Get IRQ stack for this CPU */ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(irq_stack) #else lui k1, %highest(irq_stack) daddiu k1, %higher(irq_stack) dsll k1, 16 daddiu k1, %hi(irq_stack) dsll k1, 16 #endif LONG_SRL k0, SMP_CPUID_PTRSHIFT LONG_ADDU k1, k0 LONG_L t0, %lo(irq_stack)(k1) # Check if already on IRQ stack PTR_LI t1, ~(_THREAD_SIZE-1) and t1, t1, sp beq t0, t1, 2f /* Switch to IRQ stack */ li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 /* Save task's sp on IRQ stack so that unwinding can follow it */ LONG_S s1, 0(sp) 2: jal plat_irq_dispatch /* Restore sp */ move sp, s1 j ret_from_irq #ifdef CONFIG_CPU_MICROMIPS nop #endif END(handle_int) __INIT /* * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. * This is a dedicated interrupt exception vector which reduces the * interrupt processing overhead. The jump instruction will be replaced * at the initialization time. * * Be careful when changing this, it has to be at most 128 bytes * to fit into space reserved for the exception handler. */ NESTED(except_vec4, 0, sp) 1: j 1b /* Dummy, will be replaced */ END(except_vec4) /* * EJTAG debug exception handler. * The EJTAG debug exception entry point is 0xbfc00480, which * normally is in the boot PROM, so the boot PROM must do an * unconditional jump to this vector. */ NESTED(except_vec_ejtag_debug, 0, sp) j ejtag_debug_handler #ifdef CONFIG_CPU_MICROMIPS nop #endif END(except_vec_ejtag_debug) __FINIT /* * Vectored interrupt handler. * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ BUILD_ROLLBACK_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME docfi=1 SAVE_AT docfi=1 .set push .set noreorder PTR_LA v1, except_vec_vi_handler FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ jr v1 FEXPORT(except_vec_vi_ori) ori v0, 0 /* Patched */ .set pop END(except_vec_vi) EXPORT(except_vec_vi_end) /* * Common Vectored Interrupt code * Complete the register saves and invoke the handler which is passed in $v0 */ NESTED(except_vec_vi_handler, 0, sp) SAVE_TEMP SAVE_STATIC CLI #ifdef CONFIG_TRACE_IRQFLAGS move s0, v0 TRACE_IRQS_OFF move v0, s0 #endif LONG_L s0, TI_REGS($28) LONG_S sp, TI_REGS($28) /* * SAVE_ALL ensures we are using a valid kernel stack for the thread. * Check if we are already using the IRQ stack. */ move s1, sp # Preserve the sp /* Get IRQ stack for this CPU */ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(irq_stack) #else lui k1, %highest(irq_stack) daddiu k1, %higher(irq_stack) dsll k1, 16 daddiu k1, %hi(irq_stack) dsll k1, 16 #endif LONG_SRL k0, SMP_CPUID_PTRSHIFT LONG_ADDU k1, k0 LONG_L t0, %lo(irq_stack)(k1) # Check if already on IRQ stack PTR_LI t1, ~(_THREAD_SIZE-1) and t1, t1, sp beq t0, t1, 2f /* Switch to IRQ stack */ li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 /* Save task's sp on IRQ stack so that unwinding can follow it */ LONG_S s1, 0(sp) 2: jalr v0 /* Restore sp */ move sp, s1 j ret_from_irq END(except_vec_vi_handler) /* * EJTAG debug exception handler. */ NESTED(ejtag_debug_handler, PT_SIZE, sp) .set push .set noat MTC0 k0, CP0_DESAVE mfc0 k0, CP0_DEBUG andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP. beqz k0, ejtag_return #ifdef CONFIG_SMP 1: PTR_LA k0, ejtag_debug_buffer_spinlock __SYNC(full, loongson3_war) 2: ll k0, 0(k0) bnez k0, 2b PTR_LA k0, ejtag_debug_buffer_spinlock sc k0, 0(k0) beqz k0, 1b # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC sync # endif PTR_LA k0, ejtag_debug_buffer LONG_S k1, 0(k0) ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG PTR_SRL k1, SMP_CPUID_PTRSHIFT PTR_SLL k1, LONGLOG PTR_LA k0, ejtag_debug_buffer_per_cpu PTR_ADDU k0, k1 PTR_LA k1, ejtag_debug_buffer LONG_L k1, 0(k1) LONG_S k1, 0(k0) PTR_LA k0, ejtag_debug_buffer_spinlock sw zero, 0(k0) #else PTR_LA k0, ejtag_debug_buffer LONG_S k1, 0(k0) #endif SAVE_ALL move a0, sp jal ejtag_exception_handler RESTORE_ALL #ifdef CONFIG_SMP ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG PTR_SRL k1, SMP_CPUID_PTRSHIFT PTR_SLL k1, LONGLOG PTR_LA k0, ejtag_debug_buffer_per_cpu PTR_ADDU k0, k1 LONG_L k1, 0(k0) #else PTR_LA k0, ejtag_debug_buffer LONG_L k1, 0(k0) #endif ejtag_return: back_to_back_c0_hazard MFC0 k0, CP0_DESAVE .set mips32 deret .set pop END(ejtag_debug_handler) /* * This buffer is reserved for the use of the EJTAG debug * handler. */ .data EXPORT(ejtag_debug_buffer) .fill LONGSIZE #ifdef CONFIG_SMP EXPORT(ejtag_debug_buffer_spinlock) .fill LONGSIZE EXPORT(ejtag_debug_buffer_per_cpu) .fill LONGSIZE * NR_CPUS #endif .previous __INIT /* * NMI debug exception handler for MIPS reference boards. * The NMI debug exception entry point is 0xbfc00000, which * normally is in the boot PROM, so the boot PROM must do a * unconditional jump to this vector. */ NESTED(except_vec_nmi, 0, sp) j nmi_handler #ifdef CONFIG_CPU_MICROMIPS nop #endif END(except_vec_nmi) __FINIT NESTED(nmi_handler, PT_SIZE, sp) .cfi_signal_frame .set push .set noat /* * Clear ERL - restore segment mapping * Clear BEV - required for page fault exception handler to work */ mfc0 k0, CP0_STATUS ori k0, k0, ST0_EXL li k1, ~(ST0_BEV | ST0_ERL) and k0, k0, k1 mtc0 k0, CP0_STATUS _ehb SAVE_ALL move a0, sp jal nmi_exception_handler /* nmi_exception_handler never returns */ .set pop END(nmi_handler) .macro __build_clear_none .endm .macro __build_clear_sti TRACE_IRQS_ON STI .endm .macro __build_clear_cli CLI TRACE_IRQS_OFF .endm .macro __build_clear_fpe CLI TRACE_IRQS_OFF .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ .set mips1 SET_HARDFLOAT cfc1 a1, fcr31 .set pop .endm .macro __build_clear_msa_fpe CLI TRACE_IRQS_OFF _cfcmsa a1, MSA_CSR .endm .macro __build_clear_ade MFC0 t0, CP0_BADVADDR PTR_S t0, PT_BVADDR(sp) KMODE .endm .macro __build_clear_gsexc .set push /* * We need to specify a selector to access the CP0.Diag1 (GSCause) * register. All GSExc-equipped processors have MIPS32. */ .set mips32 mfc0 a1, CP0_DIAGNOSTIC1 .set pop TRACE_IRQS_ON STI .endm .macro __BUILD_silent exception .endm /* Gas tries to parse the ASM_PRINT argument as a string containing string escapes and emits bogus warnings if it believes to recognize an unknown escape code. So make the arguments start with an n and gas will believe \n is ok ... */ .macro __BUILD_verbose nexception LONG_L a1, PT_EPC(sp) #ifdef CONFIG_32BIT ASM_PRINT("Got \nexception at %08lx\012") #endif #ifdef CONFIG_64BIT ASM_PRINT("Got \nexception at %016lx\012") #endif .endm .macro __BUILD_count exception LONG_L t0,exception_count_\exception LONG_ADDIU t0, 1 LONG_S t0,exception_count_\exception .comm exception_count\exception, 8, 8 .endm .macro __BUILD_HANDLER exception handler clear verbose ext .align 5 NESTED(handle_\exception, PT_SIZE, sp) .cfi_signal_frame .set noat SAVE_ALL FEXPORT(handle_\exception\ext) __build_clear_\clear .set at __BUILD_\verbose \exception move a0, sp jal do_\handler j ret_from_exception END(handle_\exception) .endm .macro BUILD_HANDLER exception handler clear verbose __BUILD_HANDLER \exception \handler \clear \verbose _int .endm BUILD_HANDLER adel ade ade silent /* #4 */ BUILD_HANDLER ades ade ade silent /* #5 */ BUILD_HANDLER ibe be cli silent /* #6 */ BUILD_HANDLER dbe be cli silent /* #7 */ BUILD_HANDLER bp bp sti silent /* #9 */ BUILD_HANDLER ri ri sti silent /* #10 */ BUILD_HANDLER cpu cpu sti silent /* #11 */ BUILD_HANDLER ov ov sti silent /* #12 */ BUILD_HANDLER tr tr sti silent /* #13 */ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ #ifdef CONFIG_MIPS_FP_SUPPORT BUILD_HANDLER fpe fpe fpe silent /* #15 */ #endif BUILD_HANDLER ftlb ftlb none silent /* #16 */ BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */ BUILD_HANDLER msa msa sti silent /* #21 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ #ifdef CONFIG_HARDWARE_WATCHPOINTS /* * For watch, interrupts will be enabled after the watch * registers are read. */ BUILD_HANDLER watch watch cli silent /* #23 */ #else BUILD_HANDLER watch watch sti verbose /* #23 */ #endif BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ BUILD_HANDLER mt mt sti silent /* #25 */ BUILD_HANDLER dsp dsp sti silent /* #26 */ BUILD_HANDLER reserved reserved sti verbose /* others */ .align 5 LEAF(handle_ri_rdhwr_tlbp) .set push .set noat .set noreorder /* check if TLB contains a entry for EPC */ MFC0 k1, CP0_ENTRYHI andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX MFC0 k0, CP0_EPC PTR_SRL k0, _PAGE_SHIFT + 1 PTR_SLL k0, _PAGE_SHIFT + 1 or k1, k0 MTC0 k1, CP0_ENTRYHI mtc0_tlbw_hazard tlbp tlb_probe_hazard mfc0 k1, CP0_INDEX .set pop bltz k1, handle_ri /* slow path */ /* fall thru */ END(handle_ri_rdhwr_tlbp) LEAF(handle_ri_rdhwr) .set push .set noat .set noreorder /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ MFC0 k1, CP0_EPC #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) and k0, k1, 1 beqz k0, 1f xor k1, k0 lhu k0, (k1) lhu k1, 2(k1) ins k1, k0, 16, 16 lui k0, 0x007d b docheck ori k0, 0x6b3c 1: lui k0, 0x7c03 lw k1, (k1) ori k0, 0xe83b #else andi k0, k1, 1 bnez k0, handle_ri lui k0, 0x7c03 lw k1, (k1) ori k0, 0xe83b #endif .set reorder docheck: bne k0, k1, handle_ri /* if not ours */ isrdhwr: /* The insn is rdhwr. No need to check CAUSE.BD here. */ get_saved_sp /* k1 := current_thread_info */ .set noreorder MFC0 k0, CP0_EPC #if defined(CONFIG_CPU_R3000) ori k1, _THREAD_MASK xori k1, _THREAD_MASK LONG_L v1, TI_TP_VALUE(k1) LONG_ADDIU k0, 4 jr k0 rfe #else #ifndef CONFIG_CPU_DADDI_WORKAROUNDS LONG_ADDIU k0, 4 /* stall on $k0 */ #else .set at=v1 LONG_ADDIU k0, 4 .set noat #endif MTC0 k0, CP0_EPC /* I hope three instructions between MTC0 and ERET are enough... */ ori k1, _THREAD_MASK xori k1, _THREAD_MASK LONG_L v1, TI_TP_VALUE(k1) .set push .set arch=r4000 eret .set pop #endif .set pop END(handle_ri_rdhwr) #ifdef CONFIG_CPU_R4X00_BUGS64 /* A temporary overflow handler used by check_daddi(). */ __INIT BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ #endif
aixcc-public/challenge-001-exemplar-source
12,194
arch/mips/kernel/cps-vec.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2013 Imagination Technologies * Author: Paul Burton <paul.burton@mips.com> */ #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/eva.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/pm.h> #define GCR_CPC_BASE_OFS 0x0088 #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 #define CPC_CL_VC_STOP_OFS 0x2020 #define CPC_CL_VC_RUN_OFS 0x2028 .extern mips_cm_base .set noreorder #ifdef CONFIG_64BIT # define STATUS_BITDEPS ST0_KX #else # define STATUS_BITDEPS 0 #endif #ifdef CONFIG_MIPS_CPS_NS16550 #define DUMP_EXCEP(name) \ PTR_LA a0, 8f; \ jal mips_cps_bev_dump; \ nop; \ TEXT(name) #else /* !CONFIG_MIPS_CPS_NS16550 */ #define DUMP_EXCEP(name) #endif /* !CONFIG_MIPS_CPS_NS16550 */ /* * Set dest to non-zero if the core supports the MT ASE, else zero. If * MT is not supported then branch to nomt. */ .macro has_mt dest, nomt mfc0 \dest, CP0_CONFIG, 1 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 2 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 3 andi \dest, \dest, MIPS_CONF3_MT beqz \dest, \nomt nop .endm /* * Set dest to non-zero if the core supports MIPSr6 multithreading * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then * branch to nomt. */ .macro has_vp dest, nomt mfc0 \dest, CP0_CONFIG, 1 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 2 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 3 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 4 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 5 andi \dest, \dest, MIPS_CONF5_VP beqz \dest, \nomt nop .endm /* Calculate an uncached address for the CM GCRs */ .macro cmgcrb dest .set push .set noat MFC0 $1, CP0_CMGCRBASE PTR_SLL $1, $1, 4 PTR_LI \dest, UNCAC_BASE PTR_ADDU \dest, \dest, $1 .set pop .endm .balign 0x1000 LEAF(mips_cps_core_entry) /* * These first 4 bytes will be patched by cps_smp_setup to load the * CCA to use into register s0. */ .word 0 /* Check whether we're here due to an NMI */ mfc0 k0, CP0_STATUS and k0, k0, ST0_NMI beqz k0, not_nmi nop /* This is an NMI */ PTR_LA k0, nmi_handler jr k0 nop not_nmi: /* Setup Cause */ li t0, CAUSEF_IV mtc0 t0, CP0_CAUSE /* Setup Status */ li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS mtc0 t0, CP0_STATUS /* Skip cache & coherence setup if we're already coherent */ cmgcrb v1 lw s7, GCR_CL_COHERENCE_OFS(v1) bnez s7, 1f nop /* Initialize the L1 caches */ jal mips_cps_cache_init nop /* Enter the coherent domain */ li t0, 0xff sw t0, GCR_CL_COHERENCE_OFS(v1) ehb /* Set Kseg0 CCA to that in s0 */ 1: mfc0 t0, CP0_CONFIG ori t0, 0x7 xori t0, 0x7 or t0, t0, s0 mtc0 t0, CP0_CONFIG ehb /* Jump to kseg0 */ PTR_LA t0, 1f jr t0 nop /* * We're up, cached & coherent. Perform any EVA initialization necessary * before we access memory. */ 1: eva_init /* Retrieve boot configuration pointers */ jal mips_cps_get_bootcfg nop /* Skip core-level init if we started up coherent */ bnez s7, 1f nop /* Perform any further required core-level initialisation */ jal mips_cps_core_init nop /* * Boot any other VPEs within this core that should be online, and * deactivate this VPE if it should be offline. */ move a1, t9 jal mips_cps_boot_vpes move a0, v0 /* Off we go! */ 1: PTR_L t1, VPEBOOTCFG_PC(v1) PTR_L gp, VPEBOOTCFG_GP(v1) PTR_L sp, VPEBOOTCFG_SP(v1) jr t1 nop END(mips_cps_core_entry) .org 0x200 LEAF(excep_tlbfill) DUMP_EXCEP("TLB Fill") b . nop END(excep_tlbfill) .org 0x280 LEAF(excep_xtlbfill) DUMP_EXCEP("XTLB Fill") b . nop END(excep_xtlbfill) .org 0x300 LEAF(excep_cache) DUMP_EXCEP("Cache") b . nop END(excep_cache) .org 0x380 LEAF(excep_genex) DUMP_EXCEP("General") b . nop END(excep_genex) .org 0x400 LEAF(excep_intex) DUMP_EXCEP("Interrupt") b . nop END(excep_intex) .org 0x480 LEAF(excep_ejtag) PTR_LA k0, ejtag_debug_handler jr k0 nop END(excep_ejtag) LEAF(mips_cps_core_init) #ifdef CONFIG_MIPS_MT_SMP /* Check that the core implements the MT ASE */ has_mt t0, 3f .set push .set MIPS_ISA_LEVEL_RAW .set mt /* Only allow 1 TC per VPE to execute... */ dmt /* ...and for the moment only 1 VPE */ dvpe PTR_LA t1, 1f jr.hb t1 nop /* Enter VPE configuration state */ 1: mfc0 t0, CP0_MVPCONTROL ori t0, t0, MVPCONTROL_VPC mtc0 t0, CP0_MVPCONTROL /* Retrieve the number of VPEs within the core */ mfc0 t0, CP0_MVPCONF0 srl t0, t0, MVPCONF0_PVPE_SHIFT andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) addiu ta3, t0, 1 /* If there's only 1, we're done */ beqz t0, 2f nop /* Loop through each VPE within this core */ li ta1, 1 1: /* Operate on the appropriate TC */ mtc0 ta1, CP0_VPECONTROL ehb /* Bind TC to VPE (1:1 TC:VPE mapping) */ mttc0 ta1, CP0_TCBIND /* Set exclusive TC, non-active, master */ li t0, VPECONF0_MVP sll t1, ta1, VPECONF0_XTC_SHIFT or t0, t0, t1 mttc0 t0, CP0_VPECONF0 /* Set TC non-active, non-allocatable */ mttc0 zero, CP0_TCSTATUS /* Set TC halted */ li t0, TCHALT_H mttc0 t0, CP0_TCHALT /* Next VPE */ addiu ta1, ta1, 1 slt t0, ta1, ta3 bnez t0, 1b nop /* Leave VPE configuration state */ 2: mfc0 t0, CP0_MVPCONTROL xori t0, t0, MVPCONTROL_VPC mtc0 t0, CP0_MVPCONTROL 3: .set pop #endif jr ra nop END(mips_cps_core_init) /** * mips_cps_get_bootcfg() - retrieve boot configuration pointers * * Returns: pointer to struct core_boot_config in v0, pointer to * struct vpe_boot_config in v1, VPE ID in t9 */ LEAF(mips_cps_get_bootcfg) /* Calculate a pointer to this cores struct core_boot_config */ cmgcrb t0 lw t0, GCR_CL_ID_OFS(t0) li t1, COREBOOTCFG_SIZE mul t0, t0, t1 PTR_LA t1, mips_cps_core_bootcfg PTR_L t1, 0(t1) PTR_ADDU v0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ li t9, 0 #if defined(CONFIG_CPU_MIPSR6) has_vp ta2, 1f /* * Assume non-contiguous numbering. Perhaps some day we'll need * to handle contiguous VP numbering, but no such systems yet * exist. */ mfc0 t9, CP0_GLOBALNUMBER andi t9, t9, MIPS_GLOBALNUMBER_VP #elif defined(CONFIG_MIPS_MT_SMP) has_mt ta2, 1f /* Find the number of VPEs present in the core */ mfc0 t1, CP0_MVPCONF0 srl t1, t1, MVPCONF0_PVPE_SHIFT andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT addiu t1, t1, 1 /* Calculate a mask for the VPE ID from EBase.CPUNum */ clz t1, t1 li t2, 31 subu t1, t2, t1 li t2, 1 sll t1, t2, t1 addiu t1, t1, -1 /* Retrieve the VPE ID from EBase.CPUNum */ mfc0 t9, $15, 1 and t9, t9, t1 #endif 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE mul v1, t9, t1 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) PTR_ADDU v1, v1, ta3 jr ra nop END(mips_cps_get_bootcfg) LEAF(mips_cps_boot_vpes) lw ta2, COREBOOTCFG_VPEMASK(a0) PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) #if defined(CONFIG_CPU_MIPSR6) has_vp t0, 5f /* Find base address of CPC */ cmgcrb t3 PTR_L t1, GCR_CPC_BASE_OFS(t3) PTR_LI t2, ~0x7fff and t1, t1, t2 PTR_LI t2, UNCAC_BASE PTR_ADD t1, t1, t2 /* Start any other VPs that ought to be running */ PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) /* Ensure this VP stops running if it shouldn't be */ not ta2 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) ehb #elif defined(CONFIG_MIPS_MT) /* If the core doesn't support MT then return */ has_mt t0, 5f /* Enter VPE configuration state */ .set push .set MIPS_ISA_LEVEL_RAW .set mt dvpe .set pop PTR_LA t1, 1f jr.hb t1 nop 1: mfc0 t1, CP0_MVPCONTROL ori t1, t1, MVPCONTROL_VPC mtc0 t1, CP0_MVPCONTROL ehb /* Loop through each VPE */ move t8, ta2 li ta1, 0 /* Check whether the VPE should be running. If not, skip it */ 1: andi t0, ta2, 1 beqz t0, 2f nop /* Operate on the appropriate TC */ mfc0 t0, CP0_VPECONTROL ori t0, t0, VPECONTROL_TARGTC xori t0, t0, VPECONTROL_TARGTC or t0, t0, ta1 mtc0 t0, CP0_VPECONTROL ehb .set push .set MIPS_ISA_LEVEL_RAW .set mt /* Skip the VPE if its TC is not halted */ mftc0 t0, CP0_TCHALT beqz t0, 2f nop /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE mul t0, t0, ta1 addu t0, t0, ta3 /* Set the TC restart PC */ lw t1, VPEBOOTCFG_PC(t0) mttc0 t1, CP0_TCRESTART /* Set the TC stack pointer */ lw t1, VPEBOOTCFG_SP(t0) mttgpr t1, sp /* Set the TC global pointer */ lw t1, VPEBOOTCFG_GP(t0) mttgpr t1, gp /* Copy config from this VPE */ mfc0 t0, CP0_CONFIG mttc0 t0, CP0_CONFIG /* * Copy the EVA config from this VPE if the CPU supports it. * CONFIG3 must exist to be running MT startup - just read it. */ mfc0 t0, CP0_CONFIG, 3 and t0, t0, MIPS_CONF3_SC beqz t0, 3f nop mfc0 t0, CP0_SEGCTL0 mttc0 t0, CP0_SEGCTL0 mfc0 t0, CP0_SEGCTL1 mttc0 t0, CP0_SEGCTL1 mfc0 t0, CP0_SEGCTL2 mttc0 t0, CP0_SEGCTL2 3: /* Ensure no software interrupts are pending */ mttc0 zero, CP0_CAUSE mttc0 zero, CP0_STATUS /* Set TC active, not interrupt exempt */ mftc0 t0, CP0_TCSTATUS li t1, ~TCSTATUS_IXMT and t0, t0, t1 ori t0, t0, TCSTATUS_A mttc0 t0, CP0_TCSTATUS /* Clear the TC halt bit */ mttc0 zero, CP0_TCHALT /* Set VPE active */ mftc0 t0, CP0_VPECONF0 ori t0, t0, VPECONF0_VPA mttc0 t0, CP0_VPECONF0 /* Next VPE */ 2: srl ta2, ta2, 1 addiu ta1, ta1, 1 bnez ta2, 1b nop /* Leave VPE configuration state */ mfc0 t1, CP0_MVPCONTROL xori t1, t1, MVPCONTROL_VPC mtc0 t1, CP0_MVPCONTROL ehb evpe .set pop /* Check whether this VPE is meant to be running */ li t0, 1 sll t0, t0, a1 and t0, t0, t8 bnez t0, 2f nop /* This VPE should be offline, halt the TC */ li t0, TCHALT_H mtc0 t0, CP0_TCHALT PTR_LA t0, 1f 1: jr.hb t0 nop 2: #endif /* CONFIG_MIPS_MT_SMP */ /* Return */ 5: jr ra nop END(mips_cps_boot_vpes) LEAF(mips_cps_cache_init) /* * Clear the bits used to index the caches. Note that the architecture * dictates that writing to any of TagLo or TagHi selects 0 or 2 should * be valid for all MIPS32 CPUs, even those for which said writes are * unnecessary. */ mtc0 zero, CP0_TAGLO, 0 mtc0 zero, CP0_TAGHI, 0 mtc0 zero, CP0_TAGLO, 2 mtc0 zero, CP0_TAGHI, 2 ehb /* Primary cache configuration is indicated by Config1 */ mfc0 v0, CP0_CONFIG, 1 /* Detect I-cache line size */ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ beqz t0, icache_done li t1, 2 sllv t0, t1, t0 /* Detect I-cache size */ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ xori t2, t1, 0x7 beqz t2, 1f li t3, 32 addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == I-cache sets per way */ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 li a0, CKSEG0 PTR_ADD a1, a0, t1 1: cache Index_Store_Tag_I, 0(a0) PTR_ADD a0, a0, t0 bne a0, a1, 1b nop icache_done: /* Detect D-cache line size */ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ beqz t0, dcache_done li t1, 2 sllv t0, t1, t0 /* Detect D-cache size */ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ xori t2, t1, 0x7 beqz t2, 1f li t3, 32 addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == D-cache sets per way */ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 li a0, CKSEG0 PTR_ADDU a1, a0, t1 PTR_SUBU a1, a1, t0 1: cache Index_Store_Tag_D, 0(a0) bne a0, a1, 1b PTR_ADD a0, a0, t0 dcache_done: jr ra nop END(mips_cps_cache_init) #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ .macro psstate dest .set push .set noat lw $1, TI_CPU(gp) sll $1, $1, LONGLOG PTR_LA \dest, __per_cpu_offset addu $1, $1, \dest lw $1, 0($1) PTR_LA \dest, cps_cpu_state addu \dest, \dest, $1 .set pop .endm LEAF(mips_cps_pm_save) /* Save CPU state */ SUSPEND_SAVE_REGS psstate t1 SUSPEND_SAVE_STATIC jr v0 nop END(mips_cps_pm_save) LEAF(mips_cps_pm_restore) /* Restore CPU state */ psstate t1 RESUME_RESTORE_STATIC RESUME_RESTORE_REGS_RETURN END(mips_cps_pm_restore) #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
aixcc-public/challenge-001-exemplar-source
5,216
arch/mips/kernel/scall32-o32.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2004 Thiemo Seufer * Copyright (C) 2014 Imagination Technologies Ltd. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/isadep.h> #include <asm/sysmips.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/asm-offsets.h> .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at lw t1, PT_EPC(sp) # skip syscall on return addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) sw a3, PT_R26(sp) # save a3 for syscall restarting /* * More than four arguments. Try to deal with it by copying the * stack arguments from the user stack to the kernel stack. * This Sucks (TM). */ lw t0, PT_R29(sp) # get old user stack pointer /* * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ addu t4, t0, 32 bltz t4, bad_stack # -> sp is bad /* * Ok, copy the args from the luser stack to the kernel stack. */ .set push .set noreorder .set nomacro load_a4: user_lw(t5, 16(t0)) # argument #5 from usp load_a5: user_lw(t6, 20(t0)) # argument #6 from usp load_a6: user_lw(t7, 24(t0)) # argument #7 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp loads_done: sw t5, 16(sp) # argument #5 to ksp sw t6, 20(sp) # argument #6 to ksp sw t7, 24(sp) # argument #7 to ksp sw t8, 28(sp) # argument #8 to ksp .set pop .section __ex_table,"a" PTR_WD load_a4, bad_stack_a4 PTR_WD load_a5, bad_stack_a5 PTR_WD load_a6, bad_stack_a6 PTR_WD load_a7, bad_stack_a7 .previous lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes syscall_common: subu v0, v0, __NR_O32_Linux # check syscall number sltiu t0, v0, __NR_O32_Linux_syscalls beqz t0, illegal_syscall sll t0, v0, 2 la t1, sys_call_table addu t1, t0 lw t2, (t1) # syscall routine beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sw t0, PT_R7(sp) # set error flag beqz t0, 1f lw t1, PT_R2(sp) # syscall number negu v0 # error sw t1, PT_R0(sp) # save it for syscall restarting 1: sw v0, PT_R2(sp) # result o32_syscall_exit: j syscall_exit_partial /* ------------------------------------------------------------------------ */ syscall_trace_entry: SAVE_STATIC move a0, sp /* * syscall number is in v0 unless we called syscall(__NR_###) * where the real syscall number is in a0 */ move a1, v0 subu t2, v0, __NR_O32_Linux bnez t2, 1f /* __NR_syscall at offset 0 */ lw a1, PT_R4(sp) 1: jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC lw v0, PT_R2(sp) # Restore syscall (maybe modified) lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) j syscall_common 1: j syscall_exit /* ------------------------------------------------------------------------ */ /* * Our open-coded access area sanity test for the stack pointer * failed. We probably should handle this case a bit more drastic. */ bad_stack: li v0, EFAULT sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) j o32_syscall_exit bad_stack_a4: li t5, 0 b load_a5 bad_stack_a5: li t6, 0 b load_a6 bad_stack_a6: li t7, 0 b load_a7 bad_stack_a7: li t8, 0 b loads_done /* * The system call does not exist in this kernel */ illegal_syscall: li v0, ENOSYS # error sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) j o32_syscall_exit END(handle_sys) LEAF(sys_syscall) subu t0, a0, __NR_O32_Linux # check syscall number sltiu v0, t0, __NR_O32_Linux_syscalls beqz t0, einval # do not recurse sll t1, t0, 2 beqz v0, einval lw t2, sys_call_table(t1) # syscall routine move a0, a1 # shift argument registers move a1, a2 move a2, a3 lw a3, 16(sp) lw t4, 20(sp) lw t5, 24(sp) lw t6, 28(sp) sw t4, 16(sp) sw t5, 20(sp) sw t6, 24(sp) jr t2 /* Unreached */ einval: li v0, -ENOSYS jr ra END(sys_syscall) #ifdef CONFIG_MIPS_MT_FPAFF /* * For FPU affinity scheduling on MIPS MT processors, we need to * intercept sys_sched_xxxaffinity() calls until we get a proper hook * in kernel/sched/core.c. Considered only temporary we only support * these hooks for the 32-bit kernel - there is no MIPS64 MT processor * atm. */ #define sys_sched_setaffinity mipsmt_sys_sched_setaffinity #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) PTR_WD entry .align 2 .type sys_call_table, @object EXPORT(sys_call_table) #include <asm/syscall_table_o32.h>
aixcc-public/challenge-001-exemplar-source
10,172
arch/mips/crypto/chacha-core.S
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * Copyright (C) 2016-2018 René van Dorst <opensource@vdorst.com>. All Rights Reserved. * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #define MASK_U32 0x3c #define CHACHA20_BLOCK_SIZE 64 #define STACK_SIZE 32 #define X0 $t0 #define X1 $t1 #define X2 $t2 #define X3 $t3 #define X4 $t4 #define X5 $t5 #define X6 $t6 #define X7 $t7 #define X8 $t8 #define X9 $t9 #define X10 $v1 #define X11 $s6 #define X12 $s5 #define X13 $s4 #define X14 $s3 #define X15 $s2 /* Use regs which are overwritten on exit for Tx so we don't leak clear data. */ #define T0 $s1 #define T1 $s0 #define T(n) T ## n #define X(n) X ## n /* Input arguments */ #define STATE $a0 #define OUT $a1 #define IN $a2 #define BYTES $a3 /* Output argument */ /* NONCE[0] is kept in a register and not in memory. * We don't want to touch original value in memory. * Must be incremented every loop iteration. */ #define NONCE_0 $v0 /* SAVED_X and SAVED_CA are set in the jump table. * Use regs which are overwritten on exit else we don't leak clear data. * They are used to handling the last bytes which are not multiple of 4. */ #define SAVED_X X15 #define SAVED_CA $s7 #define IS_UNALIGNED $s7 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define MSB 0 #define LSB 3 #define ROTx rotl #define ROTR(n) rotr n, 24 #define CPU_TO_LE32(n) \ wsbh n; \ rotr n, 16; #else #define MSB 3 #define LSB 0 #define ROTx rotr #define CPU_TO_LE32(n) #define ROTR(n) #endif #define FOR_EACH_WORD(x) \ x( 0); \ x( 1); \ x( 2); \ x( 3); \ x( 4); \ x( 5); \ x( 6); \ x( 7); \ x( 8); \ x( 9); \ x(10); \ x(11); \ x(12); \ x(13); \ x(14); \ x(15); #define FOR_EACH_WORD_REV(x) \ x(15); \ x(14); \ x(13); \ x(12); \ x(11); \ x(10); \ x( 9); \ x( 8); \ x( 7); \ x( 6); \ x( 5); \ x( 4); \ x( 3); \ x( 2); \ x( 1); \ x( 0); #define PLUS_ONE_0 1 #define PLUS_ONE_1 2 #define PLUS_ONE_2 3 #define PLUS_ONE_3 4 #define PLUS_ONE_4 5 #define PLUS_ONE_5 6 #define PLUS_ONE_6 7 #define PLUS_ONE_7 8 #define PLUS_ONE_8 9 #define PLUS_ONE_9 10 #define PLUS_ONE_10 11 #define PLUS_ONE_11 12 #define PLUS_ONE_12 13 #define PLUS_ONE_13 14 #define PLUS_ONE_14 15 #define PLUS_ONE_15 16 #define PLUS_ONE(x) PLUS_ONE_ ## x #define _CONCAT3(a,b,c) a ## b ## c #define CONCAT3(a,b,c) _CONCAT3(a,b,c) #define STORE_UNALIGNED(x) \ CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \ .if (x != 12); \ lw T0, (x*4)(STATE); \ .endif; \ lwl T1, (x*4)+MSB ## (IN); \ lwr T1, (x*4)+LSB ## (IN); \ .if (x == 12); \ addu X ## x, NONCE_0; \ .else; \ addu X ## x, T0; \ .endif; \ CPU_TO_LE32(X ## x); \ xor X ## x, T1; \ swl X ## x, (x*4)+MSB ## (OUT); \ swr X ## x, (x*4)+LSB ## (OUT); #define STORE_ALIGNED(x) \ CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ .if (x != 12); \ lw T0, (x*4)(STATE); \ .endif; \ lw T1, (x*4) ## (IN); \ .if (x == 12); \ addu X ## x, NONCE_0; \ .else; \ addu X ## x, T0; \ .endif; \ CPU_TO_LE32(X ## x); \ xor X ## x, T1; \ sw X ## x, (x*4) ## (OUT); /* Jump table macro. * Used for setup and handling the last bytes, which are not multiple of 4. * X15 is free to store Xn * Every jumptable entry must be equal in size. */ #define JMPTBL_ALIGNED(x) \ .Lchacha_mips_jmptbl_aligned_ ## x: ; \ .set noreorder; \ b .Lchacha_mips_xor_aligned_ ## x ## _b; \ .if (x == 12); \ addu SAVED_X, X ## x, NONCE_0; \ .else; \ addu SAVED_X, X ## x, SAVED_CA; \ .endif; \ .set reorder #define JMPTBL_UNALIGNED(x) \ .Lchacha_mips_jmptbl_unaligned_ ## x: ; \ .set noreorder; \ b .Lchacha_mips_xor_unaligned_ ## x ## _b; \ .if (x == 12); \ addu SAVED_X, X ## x, NONCE_0; \ .else; \ addu SAVED_X, X ## x, SAVED_CA; \ .endif; \ .set reorder #define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \ addu X(A), X(K); \ addu X(B), X(L); \ addu X(C), X(M); \ addu X(D), X(N); \ xor X(V), X(A); \ xor X(W), X(B); \ xor X(Y), X(C); \ xor X(Z), X(D); \ rotl X(V), S; \ rotl X(W), S; \ rotl X(Y), S; \ rotl X(Z), S; .text .set reorder .set noat .globl chacha_crypt_arch .ent chacha_crypt_arch chacha_crypt_arch: .frame $sp, STACK_SIZE, $ra /* Load number of rounds */ lw $at, 16($sp) addiu $sp, -STACK_SIZE /* Return bytes = 0. */ beqz BYTES, .Lchacha_mips_end lw NONCE_0, 48(STATE) /* Save s0-s7 */ sw $s0, 0($sp) sw $s1, 4($sp) sw $s2, 8($sp) sw $s3, 12($sp) sw $s4, 16($sp) sw $s5, 20($sp) sw $s6, 24($sp) sw $s7, 28($sp) /* Test IN or OUT is unaligned. * IS_UNALIGNED = ( IN | OUT ) & 0x00000003 */ or IS_UNALIGNED, IN, OUT andi IS_UNALIGNED, 0x3 b .Lchacha_rounds_start .align 4 .Loop_chacha_rounds: addiu IN, CHACHA20_BLOCK_SIZE addiu OUT, CHACHA20_BLOCK_SIZE addiu NONCE_0, 1 .Lchacha_rounds_start: lw X0, 0(STATE) lw X1, 4(STATE) lw X2, 8(STATE) lw X3, 12(STATE) lw X4, 16(STATE) lw X5, 20(STATE) lw X6, 24(STATE) lw X7, 28(STATE) lw X8, 32(STATE) lw X9, 36(STATE) lw X10, 40(STATE) lw X11, 44(STATE) move X12, NONCE_0 lw X13, 52(STATE) lw X14, 56(STATE) lw X15, 60(STATE) .Loop_chacha_xor_rounds: addiu $at, -2 AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16); AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12); AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8); AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7); AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16); AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12); AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8); AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7); bnez $at, .Loop_chacha_xor_rounds addiu BYTES, -(CHACHA20_BLOCK_SIZE) /* Is data src/dst unaligned? Jump */ bnez IS_UNALIGNED, .Loop_chacha_unaligned /* Set number rounds here to fill delayslot. */ lw $at, (STACK_SIZE+16)($sp) /* BYTES < 0, it has no full block. */ bltz BYTES, .Lchacha_mips_no_full_block_aligned FOR_EACH_WORD_REV(STORE_ALIGNED) /* BYTES > 0? Loop again. */ bgtz BYTES, .Loop_chacha_rounds /* Place this here to fill delay slot */ addiu NONCE_0, 1 /* BYTES < 0? Handle last bytes */ bltz BYTES, .Lchacha_mips_xor_bytes .Lchacha_mips_xor_done: /* Restore used registers */ lw $s0, 0($sp) lw $s1, 4($sp) lw $s2, 8($sp) lw $s3, 12($sp) lw $s4, 16($sp) lw $s5, 20($sp) lw $s6, 24($sp) lw $s7, 28($sp) /* Write NONCE_0 back to right location in state */ sw NONCE_0, 48(STATE) .Lchacha_mips_end: addiu $sp, STACK_SIZE jr $ra .Lchacha_mips_no_full_block_aligned: /* Restore the offset on BYTES */ addiu BYTES, CHACHA20_BLOCK_SIZE /* Get number of full WORDS */ andi $at, BYTES, MASK_U32 /* Load upper half of jump table addr */ lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0) /* Calculate lower half jump table offset */ ins T0, $at, 1, 6 /* Add offset to STATE */ addu T1, STATE, $at /* Add lower half jump table addr */ addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0) /* Read value from STATE */ lw SAVED_CA, 0(T1) /* Store remaining bytecounter as negative value */ subu BYTES, $at, BYTES jr T0 /* Jump table */ FOR_EACH_WORD(JMPTBL_ALIGNED) .Loop_chacha_unaligned: /* Set number rounds here to fill delayslot. */ lw $at, (STACK_SIZE+16)($sp) /* BYTES > 0, it has no full block. */ bltz BYTES, .Lchacha_mips_no_full_block_unaligned FOR_EACH_WORD_REV(STORE_UNALIGNED) /* BYTES > 0? Loop again. */ bgtz BYTES, .Loop_chacha_rounds /* Write NONCE_0 back to right location in state */ sw NONCE_0, 48(STATE) .set noreorder /* Fall through to byte handling */ bgez BYTES, .Lchacha_mips_xor_done .Lchacha_mips_xor_unaligned_0_b: .Lchacha_mips_xor_aligned_0_b: /* Place this here to fill delay slot */ addiu NONCE_0, 1 .set reorder .Lchacha_mips_xor_bytes: addu IN, $at addu OUT, $at /* First byte */ lbu T1, 0(IN) addiu $at, BYTES, 1 CPU_TO_LE32(SAVED_X) ROTR(SAVED_X) xor T1, SAVED_X sb T1, 0(OUT) beqz $at, .Lchacha_mips_xor_done /* Second byte */ lbu T1, 1(IN) addiu $at, BYTES, 2 ROTx SAVED_X, 8 xor T1, SAVED_X sb T1, 1(OUT) beqz $at, .Lchacha_mips_xor_done /* Third byte */ lbu T1, 2(IN) ROTx SAVED_X, 8 xor T1, SAVED_X sb T1, 2(OUT) b .Lchacha_mips_xor_done .Lchacha_mips_no_full_block_unaligned: /* Restore the offset on BYTES */ addiu BYTES, CHACHA20_BLOCK_SIZE /* Get number of full WORDS */ andi $at, BYTES, MASK_U32 /* Load upper half of jump table addr */ lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0) /* Calculate lower half jump table offset */ ins T0, $at, 1, 6 /* Add offset to STATE */ addu T1, STATE, $at /* Add lower half jump table addr */ addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0) /* Read value from STATE */ lw SAVED_CA, 0(T1) /* Store remaining bytecounter as negative value */ subu BYTES, $at, BYTES jr T0 /* Jump table */ FOR_EACH_WORD(JMPTBL_UNALIGNED) .end chacha_crypt_arch .set at /* Input arguments * STATE $a0 * OUT $a1 * NROUND $a2 */ #undef X12 #undef X13 #undef X14 #undef X15 #define X12 $a3 #define X13 $at #define X14 $v0 #define X15 STATE .set noat .globl hchacha_block_arch .ent hchacha_block_arch hchacha_block_arch: .frame $sp, STACK_SIZE, $ra addiu $sp, -STACK_SIZE /* Save X11(s6) */ sw X11, 0($sp) lw X0, 0(STATE) lw X1, 4(STATE) lw X2, 8(STATE) lw X3, 12(STATE) lw X4, 16(STATE) lw X5, 20(STATE) lw X6, 24(STATE) lw X7, 28(STATE) lw X8, 32(STATE) lw X9, 36(STATE) lw X10, 40(STATE) lw X11, 44(STATE) lw X12, 48(STATE) lw X13, 52(STATE) lw X14, 56(STATE) lw X15, 60(STATE) .Loop_hchacha_xor_rounds: addiu $a2, -2 AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16); AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12); AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8); AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7); AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16); AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12); AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8); AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7); bnez $a2, .Loop_hchacha_xor_rounds /* Restore used register */ lw X11, 0($sp) sw X0, 0(OUT) sw X1, 4(OUT) sw X2, 8(OUT) sw X3, 12(OUT) sw X12, 16(OUT) sw X13, 20(OUT) sw X14, 24(OUT) sw X15, 28(OUT) addiu $sp, STACK_SIZE jr $ra .end hchacha_block_arch .set at
aixcc-public/challenge-001-exemplar-source
12,617
arch/mips/cavium-octeon/octeon-memcpy.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Unified implementation of memcpy, memmove and the __copy_user backend. * * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde * * Mnemonic names for arguments to memcpy/__copy_user */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define dst a0 #define src a1 #define len a2 /* * Spec * * memcpy copies len bytes from src to dst and sets v0 to dst. * It assumes that * - src and dst don't overlap * - src is readable * - dst is writable * memcpy uses the standard calling convention * * __copy_user copies up to len bytes from src to dst and sets a2 (len) to * the number of uncopied bytes due to an exception caused by a read or write. * __copy_user assumes that src and dst don't overlap, and that the call is * implementing one of the following: * copy_to_user * - src is readable (no exceptions when reading src) * copy_from_user * - dst is writable (no exceptions when writing dst) * __copy_user uses a non-standard calling convention; see * arch/mips/include/asm/uaccess.h * * When an exception happens on a load, the handler must # ensure that all of the destination buffer is overwritten to prevent * leaking information to user mode programs. */ /* * Implementation */ /* * The exception handler for loads requires that: * 1- AT contain the address of the byte just past the end of the source * of the copy, * 2- src_entry <= src < AT, and * 3- (dst - src) == (dst_entry - src_entry), * The _entry suffix denotes values when __copy_user was called. * * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user * (2) is met by incrementing src by the number of bytes copied * (3) is met by not doing loads between a pair of increments of dst and src * * The exception handlers for stores adjust len (if necessary) and return. * These handlers do not need to overwrite any data. * * For __rmemcpy and memmove an exception is always a kernel bug, therefore * they're not protected. */ #define EXC(inst_reg,addr,handler) \ 9: inst_reg, addr; \ .section __ex_table,"a"; \ PTR_WD 9b, handler; \ .previous /* * Only on the 64-bit kernel we can made use of 64-bit registers. */ #define LOAD ld #define LOADL ldl #define LOADR ldr #define STOREL sdl #define STORER sdr #define STORE sd #define ADD daddu #define SUB dsubu #define SRL dsrl #define SRA dsra #define SLL dsll #define SLLV dsllv #define SRLV dsrlv #define NBYTES 8 #define LOG_NBYTES 3 /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL #define STFIRST STORER #define STREST STOREL #define SHIFT_DISCARD SLLV #else #define LDFIRST LOADL #define LDREST LOADR #define STFIRST STOREL #define STREST STORER #define SHIFT_DISCARD SRLV #endif #define FIRST(unit) ((unit)*NBYTES) #define REST(unit) (FIRST(unit)+NBYTES-1) #define UNIT(unit) FIRST(unit) #define ADDRMASK (NBYTES-1) .text .set noreorder .set noat /* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of * the number of uncopied bytes. * memcpy sets v0 to dst. */ .align 5 LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ __memcpy: FEXPORT(__raw_copy_from_user) EXPORT_SYMBOL(__raw_copy_from_user) FEXPORT(__raw_copy_to_user) EXPORT_SYMBOL(__raw_copy_to_user) /* * Note: dst & src may be unaligned, len may be 0 * Temps */ # # Octeon doesn't care if the destination is unaligned. The hardware # can fix it faster than we can special case the assembly. # pref 0, 0(src) sltu t0, len, NBYTES # Check if < 1 word bnez t0, copy_bytes_checklen and t0, src, ADDRMASK # Check if src unaligned bnez t0, src_unaligned sltu t0, len, 4*NBYTES # Check if < 4 words bnez t0, less_than_4units sltu t0, len, 8*NBYTES # Check if < 8 words bnez t0, less_than_8units sltu t0, len, 16*NBYTES # Check if < 16 words bnez t0, cleanup_both_aligned sltu t0, len, 128+1 # Check if len < 129 bnez t0, 1f # Skip prefetch if len is too short sltu t0, len, 256+1 # Check if len < 257 bnez t0, 1f # Skip prefetch if len is too short pref 0, 128(src) # We must not prefetch invalid addresses # # This is where we loop if there is more than 128 bytes left 2: pref 0, 256(src) # We must not prefetch invalid addresses # # This is where we loop if we can't prefetch anymore 1: EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) EXC( LOAD t2, UNIT(2)(src), l_exc_copy) EXC( LOAD t3, UNIT(3)(src), l_exc_copy) SUB len, len, 16*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p16u) EXC( STORE t1, UNIT(1)(dst), s_exc_p15u) EXC( STORE t2, UNIT(2)(dst), s_exc_p14u) EXC( STORE t3, UNIT(3)(dst), s_exc_p13u) EXC( LOAD t0, UNIT(4)(src), l_exc_copy) EXC( LOAD t1, UNIT(5)(src), l_exc_copy) EXC( LOAD t2, UNIT(6)(src), l_exc_copy) EXC( LOAD t3, UNIT(7)(src), l_exc_copy) EXC( STORE t0, UNIT(4)(dst), s_exc_p12u) EXC( STORE t1, UNIT(5)(dst), s_exc_p11u) EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) ADD src, src, 16*NBYTES EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) ADD dst, dst, 16*NBYTES EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16) EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16) EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16) EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16) EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16) EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16) EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u) sltu t0, len, 256+1 # See if we can prefetch more beqz t0, 2b sltu t0, len, 128 # See if we can loop more time beqz t0, 1b nop # # Jump here if there are less than 16*NBYTES left. # cleanup_both_aligned: beqz len, done sltu t0, len, 8*NBYTES bnez t0, less_than_8units nop EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) EXC( LOAD t2, UNIT(2)(src), l_exc_copy) EXC( LOAD t3, UNIT(3)(src), l_exc_copy) SUB len, len, 8*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p8u) EXC( STORE t1, UNIT(1)(dst), s_exc_p7u) EXC( STORE t2, UNIT(2)(dst), s_exc_p6u) EXC( STORE t3, UNIT(3)(dst), s_exc_p5u) EXC( LOAD t0, UNIT(4)(src), l_exc_copy) EXC( LOAD t1, UNIT(5)(src), l_exc_copy) EXC( LOAD t2, UNIT(6)(src), l_exc_copy) EXC( LOAD t3, UNIT(7)(src), l_exc_copy) EXC( STORE t0, UNIT(4)(dst), s_exc_p4u) EXC( STORE t1, UNIT(5)(dst), s_exc_p3u) EXC( STORE t2, UNIT(6)(dst), s_exc_p2u) EXC( STORE t3, UNIT(7)(dst), s_exc_p1u) ADD src, src, 8*NBYTES beqz len, done ADD dst, dst, 8*NBYTES # # Jump here if there are less than 8*NBYTES left. # less_than_8units: sltu t0, len, 4*NBYTES bnez t0, less_than_4units nop EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) EXC( LOAD t2, UNIT(2)(src), l_exc_copy) EXC( LOAD t3, UNIT(3)(src), l_exc_copy) SUB len, len, 4*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) ADD src, src, 4*NBYTES beqz len, done ADD dst, dst, 4*NBYTES # # Jump here if there are less than 4*NBYTES left. This means # we may need to copy up to 3 NBYTES words. # less_than_4units: sltu t0, len, 1*NBYTES bnez t0, copy_bytes_checklen nop # # 1) Copy NBYTES, then check length again # EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES sltu t1, len, 8 EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES bnez t1, copy_bytes_checklen ADD dst, dst, NBYTES # # 2) Copy NBYTES, then check length again # EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES sltu t1, len, 8 EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES bnez t1, copy_bytes_checklen ADD dst, dst, NBYTES # # 3) Copy NBYTES, then check length again # EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES ADD src, src, NBYTES ADD dst, dst, NBYTES b copy_bytes_checklen EXC( STORE t0, -8(dst), s_exc_p1u) src_unaligned: #define rem t8 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter beqz t0, cleanup_src_unaligned and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 1: /* * Avoid consecutive LD*'s to the same register since some mips * implementations can't issue them in the same cycle. * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ EXC( LDFIRST t0, FIRST(0)(src), l_exc) EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) SUB len, len, 4*NBYTES EXC( LDREST t0, REST(0)(src), l_exc_copy) EXC( LDREST t1, REST(1)(src), l_exc_copy) EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) EXC( LDREST t2, REST(2)(src), l_exc_copy) EXC( LDREST t3, REST(3)(src), l_exc_copy) ADD src, src, 4*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) bne len, rem, 1b ADD dst, dst, 4*NBYTES cleanup_src_unaligned: beqz len, done and rem, len, NBYTES-1 # rem = len % NBYTES beq rem, len, copy_bytes nop 1: EXC( LDFIRST t0, FIRST(0)(src), l_exc) EXC( LDREST t0, REST(0)(src), l_exc_copy) SUB len, len, NBYTES EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES bne len, rem, 1b ADD dst, dst, NBYTES copy_bytes_checklen: beqz len, done nop copy_bytes: /* 0 < len < NBYTES */ #define COPY_BYTE(N) \ EXC( lb t0, N(src), l_exc); \ SUB len, len, 1; \ beqz len, done; \ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(0) COPY_BYTE(1) COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) EXC( lb t0, NBYTES-2(src), l_exc) SUB len, len, 1 jr ra EXC( sb t0, NBYTES-2(dst), s_exc_p1) done: jr ra nop END(memcpy) l_exc_copy_rewind16: /* Rewind src and dst by 16*NBYTES for l_exc_copy */ SUB src, src, 16*NBYTES SUB dst, dst, 16*NBYTES l_exc_copy: /* * Copy bytes from src until faulting load address (or until a * lb faults) * * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) * may be more than a byte beyond the last address. * Hence, the lb below may get an exception. * * Assumes src < THREAD_BUADDR($28) */ LOAD t0, TI_TASK($28) LOAD t0, THREAD_BUADDR(t0) 1: EXC( lb t1, 0(src), l_exc) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user bne src, t0, 1b ADD dst, dst, 1 l_exc: LOAD t0, TI_TASK($28) LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address SUB len, AT, t0 # len number of uncopied bytes jr ra nop #define SEXC(n) \ s_exc_p ## n ## u: \ jr ra; \ ADD len, len, n*NBYTES SEXC(16) SEXC(15) SEXC(14) SEXC(13) SEXC(12) SEXC(11) SEXC(10) SEXC(9) SEXC(8) SEXC(7) SEXC(6) SEXC(5) SEXC(4) SEXC(3) SEXC(2) SEXC(1) s_exc_p1: jr ra ADD len, len, 1 s_exc: jr ra nop .align 5 LEAF(memmove) EXPORT_SYMBOL(memmove) ADD t0, a0, a2 ADD t1, a1, a2 sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 beqz t0, __memcpy move v0, a0 /* return value */ beqz a2, r_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 beqz t0, r_end_bytes_up # src >= dst nop ADD a0, a2 # dst = dst + len ADD a1, a2 # src = src + len r_end_bytes: lb t0, -1(a1) SUB a2, a2, 0x1 sb t0, -1(a0) SUB a1, a1, 0x1 bnez a2, r_end_bytes SUB a0, a0, 0x1 r_out: jr ra move a2, zero r_end_bytes_up: lb t0, (a1) SUB a2, a2, 0x1 sb t0, (a0) ADD a1, a1, 0x1 bnez a2, r_end_bytes_up ADD a0, a0, 0x1 jr ra move a2, zero END(__rmemcpy)
aixcc-public/challenge-001-exemplar-source
1,439
arch/mips/lib/strnlen_user.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle * Copyright (c) 1999 Silicon Graphics, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR_WD 9b, handler; \ .previous /* * Return the size of a string including the ending NUL character up to a * maximum of a1 or 0 in case of error. * * Note: for performance reasons we deliberately accept that a user may * make strlen_user and strnlen_user access the first few KSEG0 * bytes. There's nothing secret there. On 64-bit accessing beyond * the maximum is a tad hairier ... */ LEAF(__strnlen_user_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer 1: #ifdef CONFIG_CPU_DADDI_WORKAROUNDS .set noat li AT, 1 #endif beq v0, a1, 1f # limit reached? #ifdef CONFIG_EVA .set push .set eva EX(lbe, t0, (v0), .Lfault) .set pop #else EX(lb, t0, (v0), .Lfault) #endif .set noreorder bnez t0, 1b 1: #ifndef CONFIG_CPU_DADDI_WORKAROUNDS PTR_ADDIU v0, 1 #else PTR_ADDU v0, AT .set at #endif .set reorder PTR_SUBU v0, a0 jr ra END(__strnlen_user_asm) .Lfault: move v0, zero jr ra EXPORT_SYMBOL(__strnlen_user_asm)
aixcc-public/challenge-001-exemplar-source
17,966
arch/mips/lib/memcpy.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Unified implementation of memcpy, memmove and the __copy_user backend. * * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde * Copyright (C) 2007 Maciej W. Rozycki * Copyright (C) 2014 Imagination Technologies Ltd. * * Mnemonic names for arguments to memcpy/__copy_user */ /* * Hack to resolve longstanding prefetch issue * * Prefetching may be fatal on some systems if we're prefetching beyond the * end of memory on some systems. It's also a seriously bad idea on non * dma-coherent systems. */ #ifdef CONFIG_DMA_NONCOHERENT #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_MIPS_MALTA #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_CPU_MIPSR6 #undef CONFIG_CPU_HAS_PREFETCH #endif #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define dst a0 #define src a1 #define len a2 /* * Spec * * memcpy copies len bytes from src to dst and sets v0 to dst. * It assumes that * - src and dst don't overlap * - src is readable * - dst is writable * memcpy uses the standard calling convention * * __copy_user copies up to len bytes from src to dst and sets a2 (len) to * the number of uncopied bytes due to an exception caused by a read or write. * __copy_user assumes that src and dst don't overlap, and that the call is * implementing one of the following: * copy_to_user * - src is readable (no exceptions when reading src) * copy_from_user * - dst is writable (no exceptions when writing dst) * __copy_user uses a non-standard calling convention; see * include/asm-mips/uaccess.h * * When an exception happens on a load, the handler must # ensure that all of the destination buffer is overwritten to prevent * leaking information to user mode programs. */ /* * Implementation */ /* * The exception handler for loads requires that: * 1- AT contain the address of the byte just past the end of the source * of the copy, * 2- src_entry <= src < AT, and * 3- (dst - src) == (dst_entry - src_entry), * The _entry suffix denotes values when __copy_user was called. * * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user * (2) is met by incrementing src by the number of bytes copied * (3) is met by not doing loads between a pair of increments of dst and src * * The exception handlers for stores adjust len (if necessary) and return. * These handlers do not need to overwrite any data. * * For __rmemcpy and memmove an exception is always a kernel bug, therefore * they're not protected. */ /* Instruction type */ #define LD_INSN 1 #define ST_INSN 2 /* Pretech type */ #define SRC_PREFETCH 1 #define DST_PREFETCH 2 #define LEGACY_MODE 1 #define EVA_MODE 2 #define USEROP 1 #define KERNELOP 2 /* * Wrapper to add an entry in the exception table * in case the insn causes a memory exception. * Arguments: * insn : Load/store instruction * type : Instruction type * reg : Register * addr : Address * handler : Exception handler */ #define EXC(insn, type, reg, addr, handler) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR_WD 9b, handler; \ .previous; \ /* This is assembled in EVA mode */ \ .else; \ /* If loading from user or storing to user */ \ .if ((\from == USEROP) && (type == LD_INSN)) || \ ((\to == USEROP) && (type == ST_INSN)); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ .section __ex_table,"a"; \ PTR_WD 9b, handler; \ .previous; \ .else; \ /* \ * Still in EVA, but no need for \ * exception handler or EVA insn \ */ \ insn reg, addr; \ .endif; \ .endif /* * Only on the 64-bit kernel we can made use of 64-bit registers. */ #ifdef CONFIG_64BIT #define USE_DOUBLE #endif #ifdef USE_DOUBLE #define LOADK ld /* No exception */ #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) #define ADD daddu #define SUB dsubu #define SRL dsrl #define SRA dsra #define SLL dsll #define SLLV dsllv #define SRLV dsrlv #define NBYTES 8 #define LOG_NBYTES 3 /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #else #define LOADK lw /* No exception */ #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) #define ADD addu #define SUB subu #define SRL srl #define SLL sll #define SRA sra #define SLLV sllv #define SRLV srlv #define NBYTES 4 #define LOG_NBYTES 2 #endif /* USE_DOUBLE */ #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) #ifdef CONFIG_CPU_HAS_PREFETCH # define _PREF(hint, addr, type) \ .if \mode == LEGACY_MODE; \ kernel_pref(hint, addr); \ .else; \ .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ ((\to == USEROP) && (type == DST_PREFETCH)); \ /* \ * PREFE has only 9 bits for the offset \ * compared to PREF which has 16, so it may \ * need to use the $at register but this \ * register should remain intact because it's \ * used later on. Therefore use $v1. \ */ \ .set at=v1; \ user_pref(hint, addr); \ .set noat; \ .else; \ kernel_pref(hint, addr); \ .endif; \ .endif #else # define _PREF(hint, addr, type) #endif #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL #define STFIRST STORER #define STREST STOREL #define SHIFT_DISCARD SLLV #else #define LDFIRST LOADL #define LDREST LOADR #define STFIRST STOREL #define STREST STORER #define SHIFT_DISCARD SRLV #endif #define FIRST(unit) ((unit)*NBYTES) #define REST(unit) (FIRST(unit)+NBYTES-1) #define UNIT(unit) FIRST(unit) #define ADDRMASK (NBYTES-1) .text .set noreorder #ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat #else .set at=v1 #endif .align 5 /* * Macro to build the __copy_user common code * Arguments: * mode : LEGACY_MODE or EVA_MODE * from : Source operand. USEROP or KERNELOP * to : Destination operand. USEROP or KERNELOP */ .macro __BUILD_COPY_USER mode, from, to /* initialize __memcpy if this the first time we execute this macro */ .ifnotdef __memcpy .set __memcpy, 1 .hidden __memcpy /* make sure it does not leak */ .endif /* * Note: dst & src may be unaligned, len may be 0 * Temps */ #define rem t8 R10KCBARRIER(0(ra)) /* * The "issue break"s below are very approximate. * Issue delays for dcache fills will perturb the schedule, as will * load queue full replay traps, etc. * * If len < NBYTES use byte operations. */ PREFS( 0, 0(src) ) PREFD( 1, 0(dst) ) sltu t2, len, NBYTES and t1, dst, ADDRMASK PREFS( 0, 1*32(src) ) PREFD( 1, 1*32(dst) ) bnez t2, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK PREFS( 0, 2*32(src) ) PREFD( 1, 2*32(dst) ) #ifndef CONFIG_CPU_NO_LOAD_STORE_LR bnez t1, .Ldst_unaligned\@ nop bnez t0, .Lsrc_unaligned_dst_aligned\@ #else /* CONFIG_CPU_NO_LOAD_STORE_LR */ or t0, t0, t1 bnez t0, .Lcopy_unaligned_bytes\@ #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ .Lboth_aligned\@: SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) PREFS( 0, 3*32(src) ) PREFD( 1, 3*32(dst) ) .align 4 1: R10KCBARRIER(0(ra)) LOAD(t0, UNIT(0)(src), .Ll_exc\@) LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 8*NBYTES LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@) STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@) LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@) ADD src, src, 8*NBYTES ADD dst, dst, 8*NBYTES STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) PREFS( 0, 8*32(src) ) PREFD( 1, 8*32(dst) ) bne len, rem, 1b nop /* * len == rem == the number of bytes left to copy < 8*NBYTES */ .Lcleanup_both_aligned\@: beqz len, .Ldone\@ sltu t0, len, 4*NBYTES bnez t0, .Lless_than_4units\@ and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ LOAD( t0, UNIT(0)(src), .Ll_exc\@) LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@) LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@) LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES R10KCBARRIER(0(ra)) STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES beqz len, .Ldone\@ .set noreorder .Lless_than_4units\@: /* * rem = len % NBYTES */ beq rem, len, .Lcopy_bytes\@ nop 1: R10KCBARRIER(0(ra)) LOAD(t0, 0(src), .Ll_exc\@) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne rem, len, 1b .set noreorder #ifndef CONFIG_CPU_NO_LOAD_STORE_LR /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE * because can't assume read-access to dst. Instead, use * STREST dst, which doesn't require read access to dst. * * This code should perform better than a simple loop on modern, * wide-issue mips processors because the code has fewer branches and * more instruction-level parallelism. */ #define bits t2 beqz len, .Ldone\@ ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep LOAD(t0, 0(src), .Ll_exc\@) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits STREST(t0, -1(t1), .Ls_exc\@) jr ra move len, zero .Ldst_unaligned\@: /* * dst is unaligned * t0 = src & ADDRMASK * t1 = dst & ADDRMASK; T1 > 0 * len >= NBYTES * * Copy enough bytes to align dst * Set match = (src and dst have same alignment) */ #define match rem LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) ADD t2, zero, NBYTES LDREST(t3, REST(0)(src), .Ll_exc_copy\@) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 R10KCBARRIER(0(ra)) STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) beq len, t2, .Ldone\@ SUB len, len, t2 ADD dst, dst, t2 beqz match, .Lboth_aligned\@ ADD src, src, t2 .Lsrc_unaligned_dst_aligned\@: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter PREFS( 0, 3*32(src) ) beqz t0, .Lcleanup_src_unaligned\@ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES PREFD( 1, 3*32(dst) ) 1: /* * Avoid consecutive LD*'s to the same register since some mips * implementations can't issue them in the same cycle. * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ R10KCBARRIER(0(ra)) LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES LDREST(t0, REST(0)(src), .Ll_exc_copy\@) LDREST(t1, REST(1)(src), .Ll_exc_copy\@) LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) LDREST(t2, REST(2)(src), .Ll_exc_copy\@) LDREST(t3, REST(3)(src), .Ll_exc_copy\@) PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES bne len, rem, 1b .set noreorder .Lcleanup_src_unaligned\@: beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES beq rem, len, .Lcopy_bytes\@ nop 1: R10KCBARRIER(0(ra)) LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) LDREST(t0, REST(0)(src), .Ll_exc_copy\@) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne len, rem, 1b .set noreorder #endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */ .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ nop .Lcopy_bytes\@: /* 0 < len < NBYTES */ R10KCBARRIER(0(ra)) #define COPY_BYTE(N) \ LOADB(t0, N(src), .Ll_exc\@); \ SUB len, len, 1; \ beqz len, .Ldone\@; \ STOREB(t0, N(dst), .Ls_exc_p1\@) COPY_BYTE(0) COPY_BYTE(1) #ifdef USE_DOUBLE COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) #endif LOADB(t0, NBYTES-2(src), .Ll_exc\@) SUB len, len, 1 jr ra STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra nop #ifdef CONFIG_CPU_NO_LOAD_STORE_LR .Lcopy_unaligned_bytes\@: 1: COPY_BYTE(0) COPY_BYTE(1) COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) COPY_BYTE(6) COPY_BYTE(7) ADD src, src, 8 b 1b ADD dst, dst, 8 #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ .if __memcpy == 1 END(memcpy) .set __memcpy, 0 .hidden __memcpy .endif .Ll_exc_copy\@: /* * Copy bytes from src until faulting load address (or until a * lb faults) * * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) * may be more than a byte beyond the last address. * Hence, the lb below may get an exception. * * Assumes src < THREAD_BUADDR($28) */ LOADK t0, TI_TASK($28) nop LOADK t0, THREAD_BUADDR(t0) 1: LOADB(t1, 0(src), .Ll_exc\@) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user .set reorder /* DADDI_WAR */ ADD dst, dst, 1 bne src, t0, 1b .set noreorder .Ll_exc\@: LOADK t0, TI_TASK($28) nop LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes jr ra nop #define SEXC(n) \ .set reorder; /* DADDI_WAR */ \ .Ls_exc_p ## n ## u\@: \ ADD len, len, n*NBYTES; \ jr ra; \ .set noreorder SEXC(8) SEXC(7) SEXC(6) SEXC(5) SEXC(4) SEXC(3) SEXC(2) SEXC(1) .Ls_exc_p1\@: .set reorder /* DADDI_WAR */ ADD len, len, 1 jr ra .set noreorder .Ls_exc\@: jr ra nop .endm #ifndef CONFIG_HAVE_PLAT_MEMCPY .align 5 LEAF(memmove) EXPORT_SYMBOL(memmove) ADD t0, a0, a2 ADD t1, a1, a2 sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 beqz t0, .L__memcpy move v0, a0 /* return value */ beqz a2, .Lr_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 beqz t0, .Lr_end_bytes_up # src >= dst nop ADD a0, a2 # dst = dst + len ADD a1, a2 # src = src + len .Lr_end_bytes: R10KCBARRIER(0(ra)) lb t0, -1(a1) SUB a2, a2, 0x1 sb t0, -1(a0) SUB a1, a1, 0x1 .set reorder /* DADDI_WAR */ SUB a0, a0, 0x1 bnez a2, .Lr_end_bytes .set noreorder .Lr_out: jr ra move a2, zero .Lr_end_bytes_up: R10KCBARRIER(0(ra)) lb t0, (a1) SUB a2, a2, 0x1 sb t0, (a0) ADD a1, a1, 0x1 .set reorder /* DADDI_WAR */ ADD a0, a0, 0x1 bnez a2, .Lr_end_bytes_up .set noreorder jr ra move a2, zero END(__rmemcpy) /* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of * the number of uncopied bytes. * memcpy sets v0 to dst. */ .align 5 LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ .L__memcpy: #ifndef CONFIG_EVA FEXPORT(__raw_copy_from_user) EXPORT_SYMBOL(__raw_copy_from_user) FEXPORT(__raw_copy_to_user) EXPORT_SYMBOL(__raw_copy_to_user) #endif /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP #endif #ifdef CONFIG_EVA /* * For EVA we need distinct symbols for reading and writing to user space. * This is because we need to use specific EVA instructions to perform the * virtual <-> physical translation when a virtual address is actually in user * space */ /* * __copy_from_user (EVA) */ LEAF(__raw_copy_from_user) EXPORT_SYMBOL(__raw_copy_from_user) __BUILD_COPY_USER EVA_MODE USEROP KERNELOP END(__raw_copy_from_user) /* * __copy_to_user (EVA) */ LEAF(__raw_copy_to_user) EXPORT_SYMBOL(__raw_copy_to_user) __BUILD_COPY_USER EVA_MODE KERNELOP USEROP END(__raw_copy_to_user) #endif
aixcc-public/challenge-001-exemplar-source
7,682
arch/mips/lib/memset.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998, 1999, 2000 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2007 by Maciej W. Rozycki * Copyright (C) 2011, 2012 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #if LONGSIZE == 4 #define LONG_S_L swl #define LONG_S_R swr #else #define LONG_S_L sdl #define LONG_S_R sdr #endif #ifdef CONFIG_CPU_MICROMIPS #define STORSIZE (LONGSIZE * 2) #define STORMASK (STORSIZE - 1) #define FILL64RG t8 #define FILLPTRG t7 #undef LONG_S #define LONG_S LONG_SP #else #define STORSIZE LONGSIZE #define STORMASK LONGMASK #define FILL64RG a1 #define FILLPTRG t0 #endif #define LEGACY_MODE 1 #define EVA_MODE 2 /* * No need to protect it with EVA #ifdefery. The generated block of code * will never be assembled if EVA is not enabled. */ #define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr) #define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr) #define EX(insn,reg,addr,handler) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .else; \ 9: ___BUILD_EVA_INSN(insn, reg, addr); \ .endif; \ .section __ex_table,"a"; \ PTR_WD 9b, handler; \ .previous .macro f_fill64 dst, offset, val, fixup, mode EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) #if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) #endif #if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup) #endif .endm .align 5 /* * Macro to generate the __bzero{,_user} symbol * Arguments: * mode: LEGACY_MODE or EVA_MODE */ .macro __BUILD_BZERO mode /* Initialize __memset if this is the first time we call this macro */ .ifnotdef __memset .set __memset, 1 .hidden __memset /* Make sure it does not leak */ .endif sltiu t0, a2, STORSIZE /* very small region? */ .set noreorder bnez t0, .Lsmall_memset\@ andi t0, a0, STORMASK /* aligned? */ .set reorder #ifdef CONFIG_CPU_MICROMIPS move t8, a1 /* used by 'swp' instruction */ move t9, a1 #endif .set noreorder #ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f PTR_SUBU t0, STORSIZE /* alignment in bytes */ #else .set noat li AT, STORSIZE beqz t0, 1f PTR_SUBU t0, AT /* alignment in bytes */ .set at #endif .set reorder #ifndef CONFIG_CPU_NO_LOAD_STORE_LR R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ #else EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ #endif PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ #else /* CONFIG_CPU_NO_LOAD_STORE_LR */ #define STORE_BYTE(N) \ EX(sb, a1, N(a0), .Lbyte_fixup\@); \ .set noreorder; \ beqz t0, 0f; \ PTR_ADDU t0, 1; \ .set reorder; PTR_ADDU a2, t0 /* correct size */ PTR_ADDU t0, 1 STORE_BYTE(0) STORE_BYTE(1) #if LONGSIZE == 4 EX(sb, a1, 2(a0), .Lbyte_fixup\@) #else STORE_BYTE(2) STORE_BYTE(3) STORE_BYTE(4) STORE_BYTE(5) EX(sb, a1, 6(a0), .Lbyte_fixup\@) #endif 0: ori a0, STORMASK xori a0, STORMASK PTR_ADDIU a0, STORSIZE #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f andi t0, a2, 0x40-STORSIZE beqz t1, .Lmemset_partial\@ /* no block to fill */ PTR_ADDU t1, a0 /* end address */ 1: PTR_ADDIU a0, 64 R10KCBARRIER(0(ra)) f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode bne t1, a0, 1b .Lmemset_partial\@: R10KCBARRIER(0(ra)) PTR_LA t1, 2f /* where to start */ #ifdef CONFIG_CPU_MICROMIPS LONG_SRL t7, t0, 1 #endif #if LONGSIZE == 4 PTR_SUBU t1, FILLPTRG #else .set noat LONG_SRL AT, FILLPTRG, 1 PTR_SUBU t1, AT .set at #endif PTR_ADDU a0, t0 /* dest ptr */ jr t1 /* ... but first do longs ... */ f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode 2: andi a2, STORMASK /* At most one long to go */ .set noreorder beqz a2, 1f #ifndef CONFIG_CPU_NO_LOAD_STORE_LR PTR_ADDU a0, a2 /* What's left */ .set reorder R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) #else EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif #else /* CONFIG_CPU_NO_LOAD_STORE_LR */ PTR_SUBU t0, $0, a2 .set reorder move a2, zero /* No remaining longs */ PTR_ADDIU t0, 1 STORE_BYTE(0) STORE_BYTE(1) #if LONGSIZE == 4 EX(sb, a1, 2(a0), .Lbyte_fixup\@) #else STORE_BYTE(2) STORE_BYTE(3) STORE_BYTE(4) STORE_BYTE(5) EX(sb, a1, 6(a0), .Lbyte_fixup\@) #endif 0: #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ 1: move a2, zero jr ra .Lsmall_memset\@: PTR_ADDU t1, a0, a2 beqz a2, 2f 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) .set noreorder bne t1, a0, 1b EX(sb, a1, -1(a0), .Lsmall_fixup\@) .set reorder 2: move a2, zero jr ra /* done */ .if __memset == 1 END(memset) .set __memset, 0 .hidden __memset .endif #ifdef CONFIG_CPU_NO_LOAD_STORE_LR .Lbyte_fixup\@: /* * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1 * a2 = a2 - t0 + 1 */ PTR_SUBU a2, t0 PTR_ADDIU a2, 1 jr ra #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ .Lfirst_fixup\@: /* unset_bytes already in a2 */ jr ra .Lfwd_fixup\@: /* * unset_bytes = partial_start_addr + #bytes - fault_addr * a2 = t1 + (a2 & 3f) - $28->task->BUADDR */ PTR_L t0, TI_TASK($28) andi a2, 0x3f LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 LONG_SUBU a2, t0 jr ra .Lpartial_fixup\@: /* * unset_bytes = partial_end_addr + #bytes - fault_addr * a2 = a0 + (a2 & STORMASK) - $28->task->BUADDR */ PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, a0 LONG_SUBU a2, t0 jr ra .Llast_fixup\@: /* unset_bytes already in a2 */ jr ra .Lsmall_fixup\@: /* * unset_bytes = end_addr - current_addr + 1 * a2 = t1 - a0 + 1 */ PTR_SUBU a2, t1, a0 PTR_ADDIU a2, 1 jr ra .endm /* * memset(void *s, int c, size_t n) * * a0: start of area to clear * a1: char to fill with * a2: size of area to clear */ LEAF(memset) EXPORT_SYMBOL(memset) move v0, a0 /* result */ beqz a1, 1f andi a1, 0xff /* spread fillword */ LONG_SLL t1, a1, 8 or a1, t1 LONG_SLL t1, a1, 16 #if LONGSIZE == 8 or a1, t1 LONG_SLL t1, a1, 32 #endif or a1, t1 1: #ifndef CONFIG_EVA FEXPORT(__bzero) EXPORT_SYMBOL(__bzero) #endif __BUILD_BZERO LEGACY_MODE #ifdef CONFIG_EVA LEAF(__bzero) EXPORT_SYMBOL(__bzero) __BUILD_BZERO EVA_MODE END(__bzero) #endif
aixcc-public/challenge-001-exemplar-source
1,417
arch/mips/lib/strncpy_user.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1999 by Ralf Baechle * Copyright (C) 2011 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR_WD 9b, handler; \ .previous /* * Returns: -EFAULT if exception before terminator, N if the entire * buffer filled, else strlen. */ /* * Ugly special case have to check: we might get passed a user space * pointer which wraps into the kernel space. We don't deal with that. If * it happens at most some bytes of the exceptions handlers will be copied. */ LEAF(__strncpy_from_user_asm) move t0, zero move v1, a1 #ifdef CONFIG_EVA .set push .set eva 1: EX(lbue, v0, (v1), .Lfault) .set pop #else 1: EX(lbu, v0, (v1), .Lfault) #endif PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) sb v0, (a0) beqz v0, 2f PTR_ADDIU t0, 1 PTR_ADDIU a0, 1 bne t0, a2, 1b 2: PTR_ADDU v0, a1, t0 xor v0, a1 bltz v0, .Lfault move v0, t0 jr ra # return n END(__strncpy_from_user_asm) .Lfault: li v0, -EFAULT jr ra .section __ex_table,"a" PTR_WD 1b, .Lfault .previous EXPORT_SYMBOL(__strncpy_from_user_asm)
aixcc-public/challenge-001-exemplar-source
16,274
arch/mips/lib/csum_partial.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Quick'n'dirty IP checksum ... * * Copyright (C) 1998, 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2007 Maciej W. Rozycki * Copyright (C) 2014 Imagination Technologies Ltd. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #ifdef CONFIG_64BIT /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #define USE_DOUBLE #endif #ifdef USE_DOUBLE #define LOAD ld #define LOAD32 lwu #define ADD daddu #define NBYTES 8 #else #define LOAD lw #define LOAD32 lw #define ADD addu #define NBYTES 4 #endif /* USE_DOUBLE */ #define UNIT(unit) ((unit)*NBYTES) #define ADDC(sum,reg) \ .set push; \ .set noat; \ ADD sum, reg; \ sltu v1, sum, reg; \ ADD sum, v1; \ .set pop #define ADDC32(sum,reg) \ .set push; \ .set noat; \ addu sum, reg; \ sltu v1, sum, reg; \ addu sum, v1; \ .set pop #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ LOAD _t0, (offset + UNIT(0))(src); \ LOAD _t1, (offset + UNIT(1))(src); \ LOAD _t2, (offset + UNIT(2))(src); \ LOAD _t3, (offset + UNIT(3))(src); \ ADDC(_t0, _t1); \ ADDC(_t2, _t3); \ ADDC(sum, _t0); \ ADDC(sum, _t2) #ifdef USE_DOUBLE #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) #else #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \ CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3) #endif /* * a0: source address * a1: length of the area to checksum * a2: partial checksum */ #define src a0 #define sum v0 .text .set noreorder .align 5 LEAF(csum_partial) EXPORT_SYMBOL(csum_partial) move sum, zero move t7, zero sltiu t8, a1, 0x8 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */ move t2, a1 andi t7, src, 0x1 /* odd buffer? */ .Lhword_align: beqz t7, .Lword_align andi t8, src, 0x2 lbu t0, (src) LONG_SUBU a1, a1, 0x1 #ifdef __MIPSEL__ sll t0, t0, 8 #endif ADDC(sum, t0) PTR_ADDU src, src, 0x1 andi t8, src, 0x2 .Lword_align: beqz t8, .Ldword_align sltiu t8, a1, 56 lhu t0, (src) LONG_SUBU a1, a1, 0x2 ADDC(sum, t0) sltiu t8, a1, 56 PTR_ADDU src, src, 0x2 .Ldword_align: bnez t8, .Ldo_end_words move t8, a1 andi t8, src, 0x4 beqz t8, .Lqword_align andi t8, src, 0x8 LOAD32 t0, 0x00(src) LONG_SUBU a1, a1, 0x4 ADDC(sum, t0) PTR_ADDU src, src, 0x4 andi t8, src, 0x8 .Lqword_align: beqz t8, .Loword_align andi t8, src, 0x10 #ifdef USE_DOUBLE ld t0, 0x00(src) LONG_SUBU a1, a1, 0x8 ADDC(sum, t0) #else lw t0, 0x00(src) lw t1, 0x04(src) LONG_SUBU a1, a1, 0x8 ADDC(sum, t0) ADDC(sum, t1) #endif PTR_ADDU src, src, 0x8 andi t8, src, 0x10 .Loword_align: beqz t8, .Lbegin_movement LONG_SRL t8, a1, 0x7 #ifdef USE_DOUBLE ld t0, 0x00(src) ld t1, 0x08(src) ADDC(sum, t0) ADDC(sum, t1) #else CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4) #endif LONG_SUBU a1, a1, 0x10 PTR_ADDU src, src, 0x10 LONG_SRL t8, a1, 0x7 .Lbegin_movement: beqz t8, 1f andi t2, a1, 0x40 .Lmove_128bytes: CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4) LONG_SUBU t8, t8, 0x01 .set reorder /* DADDI_WAR */ PTR_ADDU src, src, 0x80 bnez t8, .Lmove_128bytes .set noreorder 1: beqz t2, 1f andi t2, a1, 0x20 .Lmove_64bytes: CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) PTR_ADDU src, src, 0x40 1: beqz t2, .Ldo_end_words andi t8, a1, 0x1c .Lmove_32bytes: CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) andi t8, a1, 0x1c PTR_ADDU src, src, 0x20 .Ldo_end_words: beqz t8, .Lsmall_csumcpy andi t2, a1, 0x3 LONG_SRL t8, t8, 0x2 .Lend_words: LOAD32 t0, (src) LONG_SUBU t8, t8, 0x1 ADDC(sum, t0) .set reorder /* DADDI_WAR */ PTR_ADDU src, src, 0x4 bnez t8, .Lend_words .set noreorder /* unknown src alignment and < 8 bytes to go */ .Lsmall_csumcpy: move a1, t2 andi t0, a1, 4 beqz t0, 1f andi t0, a1, 2 /* Still a full word to go */ ulw t1, (src) PTR_ADDIU src, 4 #ifdef USE_DOUBLE dsll t1, t1, 32 /* clear lower 32bit */ #endif ADDC(sum, t1) 1: move t1, zero beqz t0, 1f andi t0, a1, 1 /* Still a halfword to go */ ulhu t1, (src) PTR_ADDIU src, 2 1: beqz t0, 1f sll t1, t1, 16 lbu t2, (src) nop #ifdef __MIPSEB__ sll t2, t2, 8 #endif or t1, t2 1: ADDC(sum, t1) /* fold checksum */ #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 sltu v1, sum, v1 dsra32 sum, sum, 0 addu sum, v1 #endif /* odd buffer alignment? */ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ defined(CONFIG_CPU_LOONGSON64) .set push .set arch=mips32r2 wsbh v1, sum movn sum, v1, t7 .set pop #else beqz t7, 1f /* odd buffer alignment? */ lui v1, 0x00ff addu v1, 0x00ff and t0, sum, v1 sll t0, t0, 8 srl sum, sum, 8 and sum, sum, v1 or sum, sum, t0 1: #endif .set reorder /* Add the passed partial csum. */ ADDC32(sum, a2) jr ra .set noreorder END(csum_partial) /* * checksum and copy routines based on memcpy.S * * csum_partial_copy_nocheck(src, dst, len) * __csum_partial_copy_kernel(src, dst, len) * * See "Spec" in memcpy.S for details. Unlike __copy_user, all * function in this file use the standard calling convention. */ #define src a0 #define dst a1 #define len a2 #define sum v0 #define odd t8 /* * All exception handlers simply return 0. */ /* Instruction type */ #define LD_INSN 1 #define ST_INSN 2 #define LEGACY_MODE 1 #define EVA_MODE 2 #define USEROP 1 #define KERNELOP 2 /* * Wrapper to add an entry in the exception table * in case the insn causes a memory exception. * Arguments: * insn : Load/store instruction * type : Instruction type * reg : Register * addr : Address * handler : Exception handler */ #define EXC(insn, type, reg, addr) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR_WD 9b, .L_exc; \ .previous; \ /* This is enabled in EVA mode */ \ .else; \ /* If loading from user or storing to user */ \ .if ((\from == USEROP) && (type == LD_INSN)) || \ ((\to == USEROP) && (type == ST_INSN)); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ .section __ex_table,"a"; \ PTR_WD 9b, .L_exc; \ .previous; \ .else; \ /* EVA without exception */ \ insn reg, addr; \ .endif; \ .endif #undef LOAD #ifdef USE_DOUBLE #define LOADK ld /* No exception */ #define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr) #define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) #define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr) #define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr) #define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) #define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr) #define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr) #define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr) #define ADD daddu #define SUB dsubu #define SRL dsrl #define SLL dsll #define SLLV dsllv #define SRLV dsrlv #define NBYTES 8 #define LOG_NBYTES 3 #else #define LOADK lw /* No exception */ #define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr) #define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) #define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr) #define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr) #define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) #define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr) #define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr) #define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr) #define ADD addu #define SUB subu #define SRL srl #define SLL sll #define SLLV sllv #define SRLV srlv #define NBYTES 4 #define LOG_NBYTES 2 #endif /* USE_DOUBLE */ #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL #define STFIRST STORER #define STREST STOREL #define SHIFT_DISCARD SLLV #define SHIFT_DISCARD_REVERT SRLV #else #define LDFIRST LOADL #define LDREST LOADR #define STFIRST STOREL #define STREST STORER #define SHIFT_DISCARD SRLV #define SHIFT_DISCARD_REVERT SLLV #endif #define FIRST(unit) ((unit)*NBYTES) #define REST(unit) (FIRST(unit)+NBYTES-1) #define ADDRMASK (NBYTES-1) #ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat #else .set at=v1 #endif .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to li sum, -1 move odd, zero /* * Note: dst & src may be unaligned, len may be 0 * Temps */ /* * The "issue break"s below are very approximate. * Issue delays for dcache fills will perturb the schedule, as will * load queue full replay traps, etc. * * If len < NBYTES use byte operations. */ sltu t2, len, NBYTES and t1, dst, ADDRMASK bnez t2, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK andi odd, dst, 0x1 /* odd buffer? */ bnez t1, .Ldst_unaligned\@ nop bnez t0, .Lsrc_unaligned_dst_aligned\@ /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ .Lboth_aligned\@: SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES nop SUB len, 8*NBYTES # subtract here for bgez loop .align 4 1: LOAD(t0, UNIT(0)(src)) LOAD(t1, UNIT(1)(src)) LOAD(t2, UNIT(2)(src)) LOAD(t3, UNIT(3)(src)) LOAD(t4, UNIT(4)(src)) LOAD(t5, UNIT(5)(src)) LOAD(t6, UNIT(6)(src)) LOAD(t7, UNIT(7)(src)) SUB len, len, 8*NBYTES ADD src, src, 8*NBYTES STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) STORE(t4, UNIT(4)(dst)) ADDC(t4, t5) STORE(t5, UNIT(5)(dst)) ADDC(sum, t4) STORE(t6, UNIT(6)(dst)) ADDC(t6, t7) STORE(t7, UNIT(7)(dst)) ADDC(sum, t6) .set reorder /* DADDI_WAR */ ADD dst, dst, 8*NBYTES bgez len, 1b .set noreorder ADD len, 8*NBYTES # revert len (see above) /* * len == the number of bytes left to copy < 8*NBYTES */ .Lcleanup_both_aligned\@: #define rem t7 beqz len, .Ldone\@ sltu t0, len, 4*NBYTES bnez t0, .Lless_than_4units\@ and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ LOAD(t0, UNIT(0)(src)) LOAD(t1, UNIT(1)(src)) LOAD(t2, UNIT(2)(src)) LOAD(t3, UNIT(3)(src)) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES beqz len, .Ldone\@ .set noreorder .Lless_than_4units\@: /* * rem = len % NBYTES */ beq rem, len, .Lcopy_bytes\@ nop 1: LOAD(t0, 0(src)) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne rem, len, 1b .set noreorder /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE * because can't assume read-access to dst. Instead, use * STREST dst, which doesn't require read access to dst. * * This code should perform better than a simple loop on modern, * wide-issue mips processors because the code has fewer branches and * more instruction-level parallelism. */ #define bits t2 beqz len, .Ldone\@ ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep LOAD(t0, 0(src)) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits STREST(t0, -1(t1)) SHIFT_DISCARD_REVERT t0, t0, bits .set reorder ADDC(sum, t0) b .Ldone\@ .set noreorder .Ldst_unaligned\@: /* * dst is unaligned * t0 = src & ADDRMASK * t1 = dst & ADDRMASK; T1 > 0 * len >= NBYTES * * Copy enough bytes to align dst * Set match = (src and dst have same alignment) */ #define match rem LDFIRST(t3, FIRST(0)(src)) ADD t2, zero, NBYTES LDREST(t3, REST(0)(src)) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 STFIRST(t3, FIRST(0)(dst)) SLL t4, t1, 3 # t4 = number of bits to discard SHIFT_DISCARD t3, t3, t4 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ ADDC(sum, t3) beq len, t2, .Ldone\@ SUB len, len, t2 ADD dst, dst, t2 beqz match, .Lboth_aligned\@ ADD src, src, t2 .Lsrc_unaligned_dst_aligned\@: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter beqz t0, .Lcleanup_src_unaligned\@ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 1: /* * Avoid consecutive LD*'s to the same register since some mips * implementations can't issue them in the same cycle. * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ LDFIRST(t0, FIRST(0)(src)) LDFIRST(t1, FIRST(1)(src)) SUB len, len, 4*NBYTES LDREST(t0, REST(0)(src)) LDREST(t1, REST(1)(src)) LDFIRST(t2, FIRST(2)(src)) LDFIRST(t3, FIRST(3)(src)) LDREST(t2, REST(2)(src)) LDREST(t3, REST(3)(src)) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES bne len, rem, 1b .set noreorder .Lcleanup_src_unaligned\@: beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES beq rem, len, .Lcopy_bytes\@ nop 1: LDFIRST(t0, FIRST(0)(src)) LDREST(t0, REST(0)(src)) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne len, rem, 1b .set noreorder .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ nop .Lcopy_bytes\@: /* 0 < len < NBYTES */ #ifdef CONFIG_CPU_LITTLE_ENDIAN #define SHIFT_START 0 #define SHIFT_INC 8 #else #define SHIFT_START 8*(NBYTES-1) #define SHIFT_INC -8 #endif move t2, zero # partial word li t3, SHIFT_START # shift #define COPY_BYTE(N) \ LOADBU(t0, N(src)); \ SUB len, len, 1; \ STOREB(t0, N(dst)); \ SLLV t0, t0, t3; \ addu t3, SHIFT_INC; \ beqz len, .Lcopy_bytes_done\@; \ or t2, t0 COPY_BYTE(0) COPY_BYTE(1) #ifdef USE_DOUBLE COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) #endif LOADBU(t0, NBYTES-2(src)) SUB len, len, 1 STOREB(t0, NBYTES-2(dst)) SLLV t0, t0, t3 or t2, t0 .Lcopy_bytes_done\@: ADDC(sum, t2) .Ldone\@: /* fold checksum */ .set push .set noat #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 sltu v1, sum, v1 dsra32 sum, sum, 0 addu sum, v1 #endif #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ defined(CONFIG_CPU_LOONGSON64) .set push .set arch=mips32r2 wsbh v1, sum movn sum, v1, odd .set pop #else beqz odd, 1f /* odd buffer alignment? */ lui v1, 0x00ff addu v1, 0x00ff and t0, sum, v1 sll t0, t0, 8 srl sum, sum, 8 and sum, sum, v1 or sum, sum, t0 1: #endif .set pop .set reorder jr ra .set noreorder .endm .set noreorder .L_exc: jr ra li v0, 0 FEXPORT(__csum_partial_copy_nocheck) EXPORT_SYMBOL(__csum_partial_copy_nocheck) #ifndef CONFIG_EVA FEXPORT(__csum_partial_copy_to_user) EXPORT_SYMBOL(__csum_partial_copy_to_user) FEXPORT(__csum_partial_copy_from_user) EXPORT_SYMBOL(__csum_partial_copy_from_user) #endif __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP #ifdef CONFIG_EVA LEAF(__csum_partial_copy_to_user) __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP END(__csum_partial_copy_to_user) LEAF(__csum_partial_copy_from_user) __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP END(__csum_partial_copy_from_user) #endif
aixcc-public/challenge-001-exemplar-source
4,407
arch/mips/kvm/msa.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * MIPS SIMD Architecture (MSA) context handling code for KVM. * * Copyright (C) 2015 Imagination Technologies Ltd. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/regdef.h> .set noreorder .set noat LEAF(__kvm_save_msa) st_d 0, VCPU_FPR0, a0 st_d 1, VCPU_FPR1, a0 st_d 2, VCPU_FPR2, a0 st_d 3, VCPU_FPR3, a0 st_d 4, VCPU_FPR4, a0 st_d 5, VCPU_FPR5, a0 st_d 6, VCPU_FPR6, a0 st_d 7, VCPU_FPR7, a0 st_d 8, VCPU_FPR8, a0 st_d 9, VCPU_FPR9, a0 st_d 10, VCPU_FPR10, a0 st_d 11, VCPU_FPR11, a0 st_d 12, VCPU_FPR12, a0 st_d 13, VCPU_FPR13, a0 st_d 14, VCPU_FPR14, a0 st_d 15, VCPU_FPR15, a0 st_d 16, VCPU_FPR16, a0 st_d 17, VCPU_FPR17, a0 st_d 18, VCPU_FPR18, a0 st_d 19, VCPU_FPR19, a0 st_d 20, VCPU_FPR20, a0 st_d 21, VCPU_FPR21, a0 st_d 22, VCPU_FPR22, a0 st_d 23, VCPU_FPR23, a0 st_d 24, VCPU_FPR24, a0 st_d 25, VCPU_FPR25, a0 st_d 26, VCPU_FPR26, a0 st_d 27, VCPU_FPR27, a0 st_d 28, VCPU_FPR28, a0 st_d 29, VCPU_FPR29, a0 st_d 30, VCPU_FPR30, a0 st_d 31, VCPU_FPR31, a0 jr ra nop END(__kvm_save_msa) LEAF(__kvm_restore_msa) ld_d 0, VCPU_FPR0, a0 ld_d 1, VCPU_FPR1, a0 ld_d 2, VCPU_FPR2, a0 ld_d 3, VCPU_FPR3, a0 ld_d 4, VCPU_FPR4, a0 ld_d 5, VCPU_FPR5, a0 ld_d 6, VCPU_FPR6, a0 ld_d 7, VCPU_FPR7, a0 ld_d 8, VCPU_FPR8, a0 ld_d 9, VCPU_FPR9, a0 ld_d 10, VCPU_FPR10, a0 ld_d 11, VCPU_FPR11, a0 ld_d 12, VCPU_FPR12, a0 ld_d 13, VCPU_FPR13, a0 ld_d 14, VCPU_FPR14, a0 ld_d 15, VCPU_FPR15, a0 ld_d 16, VCPU_FPR16, a0 ld_d 17, VCPU_FPR17, a0 ld_d 18, VCPU_FPR18, a0 ld_d 19, VCPU_FPR19, a0 ld_d 20, VCPU_FPR20, a0 ld_d 21, VCPU_FPR21, a0 ld_d 22, VCPU_FPR22, a0 ld_d 23, VCPU_FPR23, a0 ld_d 24, VCPU_FPR24, a0 ld_d 25, VCPU_FPR25, a0 ld_d 26, VCPU_FPR26, a0 ld_d 27, VCPU_FPR27, a0 ld_d 28, VCPU_FPR28, a0 ld_d 29, VCPU_FPR29, a0 ld_d 30, VCPU_FPR30, a0 ld_d 31, VCPU_FPR31, a0 jr ra nop END(__kvm_restore_msa) .macro kvm_restore_msa_upper wr, off, base .set push .set noat #ifdef CONFIG_64BIT ld $1, \off(\base) insert_d \wr, 1 #elif defined(CONFIG_CPU_LITTLE_ENDIAN) lw $1, \off(\base) insert_w \wr, 2 lw $1, (\off+4)(\base) insert_w \wr, 3 #else /* CONFIG_CPU_BIG_ENDIAN */ lw $1, (\off+4)(\base) insert_w \wr, 2 lw $1, \off(\base) insert_w \wr, 3 #endif .set pop .endm LEAF(__kvm_restore_msa_upper) kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 jr ra nop END(__kvm_restore_msa_upper) LEAF(__kvm_restore_msacsr) lw t0, VCPU_MSA_CSR(a0) /* * The ctcmsa must stay at this offset in __kvm_restore_msacsr. * See kvm_mips_csr_die_notify() which handles t0 containing a value * which triggers an MSA FP Exception, which must be stepped over and * ignored since the set cause bits must remain there for the guest. */ _ctcmsa MSA_CSR, t0 jr ra nop END(__kvm_restore_msacsr)
aixcc-public/challenge-001-exemplar-source
3,064
arch/mips/kvm/fpu.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * FPU context handling code for KVM. * * Copyright (C) 2015 Imagination Technologies Ltd. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/regdef.h> /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ #undef fp .set noreorder .set noat LEAF(__kvm_save_fpu) .set push SET_HARDFLOAT .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip odd doubles nop sdc1 $f1, VCPU_FPR1(a0) sdc1 $f3, VCPU_FPR3(a0) sdc1 $f5, VCPU_FPR5(a0) sdc1 $f7, VCPU_FPR7(a0) sdc1 $f9, VCPU_FPR9(a0) sdc1 $f11, VCPU_FPR11(a0) sdc1 $f13, VCPU_FPR13(a0) sdc1 $f15, VCPU_FPR15(a0) sdc1 $f17, VCPU_FPR17(a0) sdc1 $f19, VCPU_FPR19(a0) sdc1 $f21, VCPU_FPR21(a0) sdc1 $f23, VCPU_FPR23(a0) sdc1 $f25, VCPU_FPR25(a0) sdc1 $f27, VCPU_FPR27(a0) sdc1 $f29, VCPU_FPR29(a0) sdc1 $f31, VCPU_FPR31(a0) 1: sdc1 $f0, VCPU_FPR0(a0) sdc1 $f2, VCPU_FPR2(a0) sdc1 $f4, VCPU_FPR4(a0) sdc1 $f6, VCPU_FPR6(a0) sdc1 $f8, VCPU_FPR8(a0) sdc1 $f10, VCPU_FPR10(a0) sdc1 $f12, VCPU_FPR12(a0) sdc1 $f14, VCPU_FPR14(a0) sdc1 $f16, VCPU_FPR16(a0) sdc1 $f18, VCPU_FPR18(a0) sdc1 $f20, VCPU_FPR20(a0) sdc1 $f22, VCPU_FPR22(a0) sdc1 $f24, VCPU_FPR24(a0) sdc1 $f26, VCPU_FPR26(a0) sdc1 $f28, VCPU_FPR28(a0) jr ra sdc1 $f30, VCPU_FPR30(a0) .set pop END(__kvm_save_fpu) LEAF(__kvm_restore_fpu) .set push SET_HARDFLOAT .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip odd doubles nop ldc1 $f1, VCPU_FPR1(a0) ldc1 $f3, VCPU_FPR3(a0) ldc1 $f5, VCPU_FPR5(a0) ldc1 $f7, VCPU_FPR7(a0) ldc1 $f9, VCPU_FPR9(a0) ldc1 $f11, VCPU_FPR11(a0) ldc1 $f13, VCPU_FPR13(a0) ldc1 $f15, VCPU_FPR15(a0) ldc1 $f17, VCPU_FPR17(a0) ldc1 $f19, VCPU_FPR19(a0) ldc1 $f21, VCPU_FPR21(a0) ldc1 $f23, VCPU_FPR23(a0) ldc1 $f25, VCPU_FPR25(a0) ldc1 $f27, VCPU_FPR27(a0) ldc1 $f29, VCPU_FPR29(a0) ldc1 $f31, VCPU_FPR31(a0) 1: ldc1 $f0, VCPU_FPR0(a0) ldc1 $f2, VCPU_FPR2(a0) ldc1 $f4, VCPU_FPR4(a0) ldc1 $f6, VCPU_FPR6(a0) ldc1 $f8, VCPU_FPR8(a0) ldc1 $f10, VCPU_FPR10(a0) ldc1 $f12, VCPU_FPR12(a0) ldc1 $f14, VCPU_FPR14(a0) ldc1 $f16, VCPU_FPR16(a0) ldc1 $f18, VCPU_FPR18(a0) ldc1 $f20, VCPU_FPR20(a0) ldc1 $f22, VCPU_FPR22(a0) ldc1 $f24, VCPU_FPR24(a0) ldc1 $f26, VCPU_FPR26(a0) ldc1 $f28, VCPU_FPR28(a0) jr ra ldc1 $f30, VCPU_FPR30(a0) .set pop END(__kvm_restore_fpu) LEAF(__kvm_restore_fcsr) .set push SET_HARDFLOAT lw t0, VCPU_FCR31(a0) /* * The ctc1 must stay at this offset in __kvm_restore_fcsr. * See kvm_mips_csr_die_notify() which handles t0 containing a value * which triggers an FP Exception, which must be stepped over and * ignored since the set cause bits must remain there for the guest. */ ctc1 t0, fcr31 jr ra nop .set pop END(__kvm_restore_fcsr)
aixcc-public/challenge-001-exemplar-source
3,975
arch/mips/mm/cex-sb1.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2001,2002,2003 Broadcom Corporation */ #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/cacheops.h> #include <asm/sibyte/board.h> #define C0_ERRCTL $26 /* CP0: Error info */ #define C0_CERR_I $27 /* CP0: Icache error */ #define C0_CERR_D $27,1 /* CP0: Dcache error */ /* * Based on SiByte sample software cache-err/cerr.S * CVS revision 1.8. Only the 'unrecoverable' case * is changed. */ .set mips64 .set noreorder .set noat /* * sb1_cerr_vec: code to be copied to the Cache Error * Exception vector. The code must be pushed out to memory * (either by copying to Kseg0 and Kseg1 both, or by flushing * the L1 and L2) since it is fetched as 0xa0000100. * * NOTE: Be sure this handler is at most 28 instructions long * since the final 16 bytes of the exception vector memory * (0x170-0x17f) are used to preserve k0, k1, and ra. */ LEAF(except_vec2_sb1) /* * If this error is recoverable, we need to exit the handler * without having dirtied any registers. To do this, * save/restore k0 and k1 from low memory (Useg is direct * mapped while ERL=1). Note that we can't save to a * CPU-specific location without ruining a register in the * process. This means we are vulnerable to data corruption * whenever the handler is reentered by a second CPU. */ sd k0,0x170($0) sd k1,0x178($0) #ifdef CONFIG_SB1_CEX_ALWAYS_FATAL j handle_vec2_sb1 nop #else /* * M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell * if we can fast-path out of here for a h/w-recovered error. */ mfc0 k1,C0_ERRCTL bgtz k1,attempt_recovery sll k0,k1,1 recovered_dcache: /* * Unlock CacheErr-D (which in turn unlocks CacheErr-DPA). * Ought to log the occurrence of this recovered dcache error. */ b recovered mtc0 $0,C0_CERR_D attempt_recovery: /* * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any * Dcache errors we can recover from will take more extensive * processing. For now, they are considered "unrecoverable". * Note that 'DC' becoming set (outside of ERL mode) will * cause 'IC' to clear; so if there's an Icache error, we'll * only find out about it if we recover from this error and * continue executing. */ bltz k0,unrecoverable sll k0,1 /* * k0 has C0_ERRCTL << 2, which puts 'IC' at bit 31. If an * Icache error isn't indicated, I'm not sure why we got here. * Consider that case "unrecoverable" for now. */ bgez k0,unrecoverable attempt_icache_recovery: /* * External icache errors are due to uncorrectable ECC errors * in the L2 cache or Memory Controller and cannot be * recovered here. */ mfc0 k0,C0_CERR_I /* delay slot */ li k1,1 << 26 /* ICACHE_EXTERNAL */ and k1,k0 bnez k1,unrecoverable andi k0,0x1fe0 /* * Since the error is internal, the 'IDX' field from * CacheErr-I is valid and we can just invalidate all blocks * in that set. */ cache Index_Invalidate_I,(0<<13)(k0) cache Index_Invalidate_I,(1<<13)(k0) cache Index_Invalidate_I,(2<<13)(k0) cache Index_Invalidate_I,(3<<13)(k0) /* Ought to log this recovered icache error */ recovered: /* Restore the saved registers */ ld k0,0x170($0) ld k1,0x178($0) eret unrecoverable: /* Unrecoverable Icache or Dcache error; log it and/or fail */ j handle_vec2_sb1 nop #endif END(except_vec2_sb1) LEAF(handle_vec2_sb1) mfc0 k0,CP0_CONFIG li k1,~CONF_CM_CMASK and k0,k0,k1 ori k0,k0,CONF_CM_UNCACHED mtc0 k0,CP0_CONFIG SSNOP SSNOP SSNOP SSNOP bnezl $0, 1f 1: mfc0 k0, CP0_STATUS sll k0, k0, 3 # check CU0 (kernel?) bltz k0, 2f nop /* Get a valid Kseg0 stack pointer. Any task's stack pointer * will do, although if we ever want to resume execution we * better not have corrupted any state. */ get_saved_sp move sp, k1 2: j sb1_cache_error nop END(handle_vec2_sb1)
aixcc-public/challenge-001-exemplar-source
1,452
arch/mips/mm/page-funcs.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Micro-assembler generated clear_page/copy_page functions. * * Copyright (C) 2012 MIPS Technologies, Inc. * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> */ #include <asm/asm.h> #include <asm/export.h> #include <asm/regdef.h> #ifdef CONFIG_SIBYTE_DMA_PAGEOPS #define cpu_clear_page_function_name clear_page_cpu #define cpu_copy_page_function_name copy_page_cpu #else #define cpu_clear_page_function_name clear_page #define cpu_copy_page_function_name copy_page #endif /* * Maximum sizes: * * R4000 128 bytes S-cache: 0x058 bytes * R4600 v1.7: 0x05c bytes * R4600 v2.0: 0x060 bytes * With prefetching, 16 word strides 0x120 bytes */ EXPORT(__clear_page_start) LEAF(cpu_clear_page_function_name) EXPORT_SYMBOL(cpu_clear_page_function_name) 1: j 1b /* Dummy, will be replaced. */ .space 288 END(cpu_clear_page_function_name) EXPORT(__clear_page_end) /* * Maximum sizes: * * R4000 128 bytes S-cache: 0x11c bytes * R4600 v1.7: 0x080 bytes * R4600 v2.0: 0x07c bytes * With prefetching, 16 word strides 0x540 bytes */ EXPORT(__copy_page_start) LEAF(cpu_copy_page_function_name) EXPORT_SYMBOL(cpu_copy_page_function_name) 1: j 1b /* Dummy, will be replaced. */ .space 1344 END(cpu_copy_page_function_name) EXPORT(__copy_page_end)
aixcc-public/challenge-001-exemplar-source
1,501
arch/mips/mm/cex-oct.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006 Cavium Networks * Cache error handler */ #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> /* * Handle cache error. Indicate to the second level handler whether * the exception is recoverable. */ LEAF(except_vec2_octeon) .set push .set mips64r2 .set noreorder .set noat /* due to an errata we need to read the COP0 CacheErr (Dcache) * before any cache/DRAM access */ rdhwr k0, $0 /* get core_id */ PTR_LA k1, cache_err_dcache sll k0, k0, 3 PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */ dmfc0 k0, CP0_CACHEERR, 1 sd k0, (k1) dmtc0 $0, CP0_CACHEERR, 1 /* check whether this is a nested exception */ mfc0 k1, CP0_STATUS andi k1, k1, ST0_EXL beqz k1, 1f nop j cache_parity_error_octeon_non_recoverable nop /* exception is recoverable */ 1: j handle_cache_err nop .set pop END(except_vec2_octeon) /* We need to jump to handle_cache_err so that the previous handler * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */ LEAF(handle_cache_err) .set push .set noreorder .set noat SAVE_ALL KMODE jal cache_parity_error_octeon_recoverable nop j ret_from_exception nop .set pop END(handle_cache_err)
aixcc-public/challenge-001-exemplar-source
1,085
arch/mips/boot/compressed/head.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995 Waldorf Electronics * Written by Ralf Baechle and Andreas Busse * Copyright (C) 1995 - 1999 Ralf Baechle * Copyright (C) 1996 Paul M. Antoine * Modified for DECStation and hence R3000 support by Paul M. Antoine * Further modifications by David S. Miller and Harald Koerfgen * Copyright (C) 1999 Silicon Graphics, Inc. */ #include <asm/asm.h> #include <asm/regdef.h> LEAF(start) /* Save boot rom start args */ move s0, a0 move s1, a1 move s2, a2 move s3, a3 /* Clear BSS */ PTR_LA a0, _edata PTR_LA a2, _end 1: sw zero, 0(a0) addiu a0, a0, 4 bne a2, a0, 1b PTR_LA a0, (.heap) /* heap address */ PTR_LA sp, (.stack + 8192) /* stack address */ PTR_LA t9, decompress_kernel jalr t9 2: move a0, s0 move a1, s1 move a2, s2 move a3, s3 PTR_LI t9, KERNEL_ENTRY jalr t9 3: b 3b END(start) .comm .heap,BOOT_HEAP_SIZE,4 .comm .stack,4096*2,4
aixcc-public/challenge-001-exemplar-source
2,902
arch/mips/fw/lib/call_o32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * O32 interface for the 64 (or N32) ABI. * * Copyright (C) 2002, 2014 Maciej W. Rozycki */ #include <asm/asm.h> #include <asm/regdef.h> /* O32 register size. */ #define O32_SZREG 4 /* Maximum number of arguments supported. Must be even! */ #define O32_ARGC 32 /* Number of static registers we save. */ #define O32_STATC 11 /* Argument area frame size. */ #define O32_ARGSZ (O32_SZREG * O32_ARGC) /* Static register save area frame size. */ #define O32_STATSZ (SZREG * O32_STATC) /* Stack pointer register save area frame size. */ #define O32_SPSZ SZREG /* Combined area frame size. */ #define O32_FRAMESZ (O32_ARGSZ + O32_SPSZ + O32_STATSZ) /* Switched stack frame size. */ #define O32_NFRAMESZ (O32_ARGSZ + O32_SPSZ) .text /* * O32 function call dispatcher, for interfacing 32-bit ROM routines. * * The standard 64 (N32) calling sequence is supported, with a0 holding * a function pointer, a1 a pointer to the new stack to call the * function with or 0 if no stack switching is requested, a2-a7 -- the * function call's first six arguments, and the stack -- the remaining * arguments (up to O32_ARGC, including a2-a7). Static registers, gp * and fp are preserved, v0 holds the result. This code relies on the * called o32 function for sp and ra restoration and this dispatcher has * to be placed in a KSEGx (or KUSEG) address space. Any pointers * passed have to point to addresses within one of these spaces as well. */ NESTED(call_o32, O32_FRAMESZ, ra) REG_SUBU sp,O32_FRAMESZ REG_S ra,O32_FRAMESZ-1*SZREG(sp) REG_S fp,O32_FRAMESZ-2*SZREG(sp) REG_S gp,O32_FRAMESZ-3*SZREG(sp) REG_S s7,O32_FRAMESZ-4*SZREG(sp) REG_S s6,O32_FRAMESZ-5*SZREG(sp) REG_S s5,O32_FRAMESZ-6*SZREG(sp) REG_S s4,O32_FRAMESZ-7*SZREG(sp) REG_S s3,O32_FRAMESZ-8*SZREG(sp) REG_S s2,O32_FRAMESZ-9*SZREG(sp) REG_S s1,O32_FRAMESZ-10*SZREG(sp) REG_S s0,O32_FRAMESZ-11*SZREG(sp) move jp,a0 move fp,sp beqz a1,0f REG_SUBU fp,a1,O32_NFRAMESZ 0: REG_S sp,O32_NFRAMESZ-1*SZREG(fp) sll a0,a2,zero sll a1,a3,zero sll a2,a4,zero sll a3,a5,zero sw a6,4*O32_SZREG(fp) sw a7,5*O32_SZREG(fp) PTR_LA t0,O32_FRAMESZ(sp) PTR_LA t1,6*O32_SZREG(fp) li t2,O32_ARGC-6 1: lw t3,(t0) REG_ADDU t0,SZREG sw t3,(t1) REG_SUBU t2,1 REG_ADDU t1,O32_SZREG bnez t2,1b move sp,fp jalr jp REG_L sp,O32_NFRAMESZ-1*SZREG(sp) REG_L s0,O32_FRAMESZ-11*SZREG(sp) REG_L s1,O32_FRAMESZ-10*SZREG(sp) REG_L s2,O32_FRAMESZ-9*SZREG(sp) REG_L s3,O32_FRAMESZ-8*SZREG(sp) REG_L s4,O32_FRAMESZ-7*SZREG(sp) REG_L s5,O32_FRAMESZ-6*SZREG(sp) REG_L s6,O32_FRAMESZ-5*SZREG(sp) REG_L s7,O32_FRAMESZ-4*SZREG(sp) REG_L gp,O32_FRAMESZ-3*SZREG(sp) REG_L fp,O32_FRAMESZ-2*SZREG(sp) REG_L ra,O32_FRAMESZ-1*SZREG(sp) REG_ADDU sp,O32_FRAMESZ jr ra END(call_o32)
aixcc-public/challenge-001-exemplar-source
5,027
arch/mips/alchemy/common/sleeper.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2002 Embedded Edge, LLC * Author: dan@embeddededge.com * * Sleep helper for Au1xxx sleep mode. */ #include <asm/asm.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> .extern __flush_cache_all .text .set noreorder .set noat .align 5 /* preparatory stuff */ .macro SETUP_SLEEP subu sp, PT_SIZE sw $1, PT_R1(sp) sw $2, PT_R2(sp) sw $3, PT_R3(sp) sw $4, PT_R4(sp) sw $5, PT_R5(sp) sw $6, PT_R6(sp) sw $7, PT_R7(sp) sw $16, PT_R16(sp) sw $17, PT_R17(sp) sw $18, PT_R18(sp) sw $19, PT_R19(sp) sw $20, PT_R20(sp) sw $21, PT_R21(sp) sw $22, PT_R22(sp) sw $23, PT_R23(sp) sw $26, PT_R26(sp) sw $27, PT_R27(sp) sw $28, PT_R28(sp) sw $30, PT_R30(sp) sw $31, PT_R31(sp) mfc0 k0, CP0_STATUS sw k0, 0x20(sp) mfc0 k0, CP0_CONTEXT sw k0, 0x1c(sp) mfc0 k0, CP0_PAGEMASK sw k0, 0x18(sp) mfc0 k0, CP0_CONFIG sw k0, 0x14(sp) /* flush caches to make sure context is in memory */ la t1, __flush_cache_all lw t0, 0(t1) jalr t0 nop /* Now set up the scratch registers so the boot rom will * return to this point upon wakeup. * sys_scratch0 : SP * sys_scratch1 : RA */ lui t3, 0xb190 /* sys_xxx */ sw sp, 0x0018(t3) la k0, alchemy_sleep_wakeup /* resume path */ sw k0, 0x001c(t3) .endm .macro DO_SLEEP /* put power supply and processor to sleep */ sw zero, 0x0078(t3) /* sys_slppwr */ sync sw zero, 0x007c(t3) /* sys_sleep */ sync nop nop nop nop nop nop nop nop .endm /* sleep code for Au1000/Au1100/Au1500 memory controller type */ LEAF(alchemy_sleep_au1000) SETUP_SLEEP /* cache following instructions, as memory gets put to sleep */ la t0, 1f .set arch=r4000 cache 0x14, 0(t0) cache 0x14, 32(t0) cache 0x14, 64(t0) cache 0x14, 96(t0) .set mips0 1: lui a0, 0xb400 /* mem_xxx */ sw zero, 0x001c(a0) /* Precharge */ sync sw zero, 0x0020(a0) /* Auto Refresh */ sync sw zero, 0x0030(a0) /* Sleep */ sync DO_SLEEP END(alchemy_sleep_au1000) /* sleep code for Au1550/Au1200 memory controller type */ LEAF(alchemy_sleep_au1550) SETUP_SLEEP /* cache following instructions, as memory gets put to sleep */ la t0, 1f .set arch=r4000 cache 0x14, 0(t0) cache 0x14, 32(t0) cache 0x14, 64(t0) cache 0x14, 96(t0) .set mips0 1: lui a0, 0xb400 /* mem_xxx */ sw zero, 0x08c0(a0) /* Precharge */ sync sw zero, 0x08d0(a0) /* Self Refresh */ sync /* wait for sdram to enter self-refresh mode */ lui t0, 0x0100 2: lw t1, 0x0850(a0) /* mem_sdstat */ and t2, t1, t0 beq t2, zero, 2b nop /* disable SDRAM clocks */ lui t0, 0xcfff ori t0, t0, 0xffff lw t1, 0x0840(a0) /* mem_sdconfiga */ and t1, t0, t1 /* clear CE[1:0] */ sw t1, 0x0840(a0) /* mem_sdconfiga */ sync DO_SLEEP END(alchemy_sleep_au1550) /* sleepcode for Au1300 memory controller type */ LEAF(alchemy_sleep_au1300) SETUP_SLEEP /* cache following instructions, as memory gets put to sleep */ la t0, 2f la t1, 4f subu t2, t1, t0 .set arch=r4000 1: cache 0x14, 0(t0) subu t2, t2, 32 bgez t2, 1b addu t0, t0, 32 .set mips0 2: lui a0, 0xb400 /* mem_xxx */ /* disable all ports in mem_sdportcfga */ sw zero, 0x868(a0) /* mem_sdportcfga */ sync /* disable ODT */ li t0, 0x03010000 sw t0, 0x08d8(a0) /* mem_sdcmd0 */ sw t0, 0x08dc(a0) /* mem_sdcmd1 */ sync /* precharge */ li t0, 0x23000400 sw t0, 0x08dc(a0) /* mem_sdcmd1 */ sw t0, 0x08d8(a0) /* mem_sdcmd0 */ sync /* auto refresh */ sw zero, 0x08c8(a0) /* mem_sdautoref */ sync /* block access to the DDR */ lw t0, 0x0848(a0) /* mem_sdconfigb */ li t1, (1 << 7 | 0x3F) or t0, t0, t1 sw t0, 0x0848(a0) /* mem_sdconfigb */ sync /* issue the Self Refresh command */ li t0, 0x10000000 sw t0, 0x08dc(a0) /* mem_sdcmd1 */ sw t0, 0x08d8(a0) /* mem_sdcmd0 */ sync /* wait for sdram to enter self-refresh mode */ lui t0, 0x0300 3: lw t1, 0x0850(a0) /* mem_sdstat */ and t2, t1, t0 bne t2, t0, 3b nop /* disable SDRAM clocks */ li t0, ~(3<<28) lw t1, 0x0840(a0) /* mem_sdconfiga */ and t1, t1, t0 /* clear CE[1:0] */ sw t1, 0x0840(a0) /* mem_sdconfiga */ sync DO_SLEEP 4: END(alchemy_sleep_au1300) /* This is where we return upon wakeup. * Reload all of the registers and return. */ LEAF(alchemy_sleep_wakeup) lw k0, 0x20(sp) mtc0 k0, CP0_STATUS lw k0, 0x1c(sp) mtc0 k0, CP0_CONTEXT lw k0, 0x18(sp) mtc0 k0, CP0_PAGEMASK lw k0, 0x14(sp) mtc0 k0, CP0_CONFIG /* We need to catch the early Alchemy SOCs with * the write-only Config[OD] bit and set it back to one... */ jal au1x00_fixup_config_od nop lw $1, PT_R1(sp) lw $2, PT_R2(sp) lw $3, PT_R3(sp) lw $4, PT_R4(sp) lw $5, PT_R5(sp) lw $6, PT_R6(sp) lw $7, PT_R7(sp) lw $16, PT_R16(sp) lw $17, PT_R17(sp) lw $18, PT_R18(sp) lw $19, PT_R19(sp) lw $20, PT_R20(sp) lw $21, PT_R21(sp) lw $22, PT_R22(sp) lw $23, PT_R23(sp) lw $26, PT_R26(sp) lw $27, PT_R27(sp) lw $28, PT_R28(sp) lw $30, PT_R30(sp) lw $31, PT_R31(sp) jr ra addiu sp, PT_SIZE END(alchemy_sleep_wakeup)
aixcc-public/challenge-001-exemplar-source
11,409
arch/powerpc/kexec/relocate_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains kexec low-level functions. * * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz * PPC44x port. Copyright (C) 2011, IBM Corporation * Author: Suzuki Poulose <suzuki@in.ibm.com> */ #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/kexec.h> .text /* * Must be relocatable PIC code callable as a C function. */ .globl relocate_new_kernel relocate_new_kernel: /* r3 = page_list */ /* r4 = reboot_code_buffer */ /* r5 = start_address */ #ifdef CONFIG_PPC_85xx mr r29, r3 mr r30, r4 mr r31, r5 #define ENTRY_MAPPING_KEXEC_SETUP #include <kernel/85xx_entry_mapping.S> #undef ENTRY_MAPPING_KEXEC_SETUP mr r3, r29 mr r4, r30 mr r5, r31 li r0, 0 #elif defined(CONFIG_44x) /* Save our parameters */ mr r29, r3 mr r30, r4 mr r31, r5 #ifdef CONFIG_PPC_47x /* Check for 47x cores */ mfspr r3,SPRN_PVR srwi r3,r3,16 cmplwi cr0,r3,PVR_476FPE@h beq setup_map_47x cmplwi cr0,r3,PVR_476@h beq setup_map_47x cmplwi cr0,r3,PVR_476_ISS@h beq setup_map_47x #endif /* CONFIG_PPC_47x */ /* * Code for setting up 1:1 mapping for PPC440x for KEXEC * * We cannot switch off the MMU on PPC44x. * So we: * 1) Invalidate all the mappings except the one we are running from. * 2) Create a tmp mapping for our code in the other address space(TS) and * jump to it. Invalidate the entry we started in. * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS. * 4) Jump to the 1:1 mapping in original TS. * 5) Invalidate the tmp mapping. * * - Based on the kexec support code for FSL BookE * */ /* * Load the PID with kernel PID (0). * Also load our MSR_IS and TID to MMUCR for TLB search. */ li r3, 0 mtspr SPRN_PID, r3 mfmsr r4 andi. r4,r4,MSR_IS@l beq wmmucr oris r3,r3,PPC44x_MMUCR_STS@h wmmucr: mtspr SPRN_MMUCR,r3 sync /* * Invalidate all the TLB entries except the current entry * where we are running from */ bcl 20,31,$+4 /* Find our address */ 0: mflr r5 /* Make it accessible */ tlbsx r23,0,r5 /* Find entry we are in */ li r4,0 /* Start at TLB entry 0 */ li r3,0 /* Set PAGEID inval value */ 1: cmpw r23,r4 /* Is this our entry? */ beq skip /* If so, skip the inval */ tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ skip: addi r4,r4,1 /* Increment */ cmpwi r4,64 /* Are we done? */ bne 1b /* If not, repeat */ isync /* Create a temp mapping and jump to it */ andi. r6, r23, 1 /* Find the index to use */ addi r24, r6, 1 /* r24 will contain 1 or 2 */ mfmsr r9 /* get the MSR */ rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */ xori r7, r5, 1 /* Use the other address space */ /* Read the current mapping entries */ tlbre r3, r23, PPC44x_TLB_PAGEID tlbre r4, r23, PPC44x_TLB_XLAT tlbre r5, r23, PPC44x_TLB_ATTRIB /* Save our current XLAT entry */ mr r25, r4 /* Extract the TLB PageSize */ li r10, 1 /* r10 will hold PageSize */ rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */ /* XXX: As of now we use 256M, 4K pages */ cmpwi r11, PPC44x_TLB_256M bne tlb_4k rotlwi r10, r10, 28 /* r10 = 256M */ b write_out tlb_4k: cmpwi r11, PPC44x_TLB_4K bne default rotlwi r10, r10, 12 /* r10 = 4K */ b write_out default: rotlwi r10, r10, 10 /* r10 = 1K */ write_out: /* * Write out the tmp 1:1 mapping for this code in other address space * Fixup EPN = RPN , TS=other address space */ insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */ /* Write out the tmp mapping entries */ tlbwe r3, r24, PPC44x_TLB_PAGEID tlbwe r4, r24, PPC44x_TLB_XLAT tlbwe r5, r24, PPC44x_TLB_ATTRIB subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */ not r10, r11 /* Mask for PageNum */ /* Switch to other address space in MSR */ insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ bcl 20,31,$+4 1: mflr r8 addi r8, r8, (2f-1b) /* Find the target offset */ /* Jump to the tmp mapping */ mtspr SPRN_SRR0, r8 mtspr SPRN_SRR1, r9 rfi 2: /* Invalidate the entry we were executing from */ li r3, 0 tlbwe r3, r23, PPC44x_TLB_PAGEID /* attribute fields. rwx for SUPERVISOR mode */ li r5, 0 ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) /* Create 1:1 mapping in 256M pages */ xori r7, r7, 1 /* Revert back to Original TS */ li r8, 0 /* PageNumber */ li r6, 3 /* TLB Index, start at 3 */ next_tlb: rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */ mr r4, r3 /* RPN = EPN */ ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */ insrwi r3, r7, 1, 23 /* Set TS from r7 */ tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */ tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */ tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */ addi r8, r8, 1 /* Increment PN */ addi r6, r6, 1 /* Increment TLB Index */ cmpwi r8, 8 /* Are we done ? */ bne next_tlb isync /* Jump to the new mapping 1:1 */ li r9,0 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ bcl 20,31,$+4 1: mflr r8 and r8, r8, r11 /* Get our offset within page */ addi r8, r8, (2f-1b) and r5, r25, r10 /* Get our target PageNum */ or r8, r8, r5 /* Target jump address */ mtspr SPRN_SRR0, r8 mtspr SPRN_SRR1, r9 rfi 2: /* Invalidate the tmp entry we used */ li r3, 0 tlbwe r3, r24, PPC44x_TLB_PAGEID sync b ppc44x_map_done #ifdef CONFIG_PPC_47x /* 1:1 mapping for 47x */ setup_map_47x: /* * Load the kernel pid (0) to PID and also to MMUCR[TID]. * Also set the MSR IS->MMUCR STS */ li r3, 0 mtspr SPRN_PID, r3 /* Set PID */ mfmsr r4 /* Get MSR */ andi. r4, r4, MSR_IS@l /* TS=1? */ beq 1f /* If not, leave STS=0 */ oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */ 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */ sync /* Find the entry we are running from */ bcl 20,31,$+4 2: mflr r23 tlbsx r23, 0, r23 tlbre r24, r23, 0 /* TLB Word 0 */ tlbre r25, r23, 1 /* TLB Word 1 */ tlbre r26, r23, 2 /* TLB Word 2 */ /* * Invalidates all the tlb entries by writing to 256 RPNs(r4) * of 4k page size in all 4 ways (0-3 in r3). * This would invalidate the entire UTLB including the one we are * running from. However the shadow TLB entries would help us * to continue the execution, until we flush them (rfi/isync). */ addis r3, 0, 0x8000 /* specify the way */ addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */ addi r5, 0, 0 b clear_utlb_entry /* Align the loop to speed things up. from head_44x.S */ .align 6 clear_utlb_entry: tlbwe r4, r3, 0 tlbwe r5, r3, 1 tlbwe r5, r3, 2 addis r3, r3, 0x2000 /* Increment the way */ cmpwi r3, 0 bne clear_utlb_entry addis r3, 0, 0x8000 addis r4, r4, 0x100 /* Increment the EPN */ cmpwi r4, 0 bne clear_utlb_entry /* Create the entries in the other address space */ mfmsr r5 rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */ xori r7, r7, 1 /* r7 = !TS */ insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */ /* * write out the TLB entries for the tmp mapping * Use way '0' so that we could easily invalidate it later. */ lis r3, 0x8000 /* Way '0' */ tlbwe r24, r3, 0 tlbwe r25, r3, 1 tlbwe r26, r3, 2 /* Update the msr to the new TS */ insrwi r5, r7, 1, 26 bcl 20,31,$+4 1: mflr r6 addi r6, r6, (2f-1b) mtspr SPRN_SRR0, r6 mtspr SPRN_SRR1, r5 rfi /* * Now we are in the tmp address space. * Create a 1:1 mapping for 0-2GiB in the original TS. */ 2: li r3, 0 li r4, 0 /* TLB Word 0 */ li r5, 0 /* TLB Word 1 */ li r6, 0 ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */ li r8, 0 /* PageIndex */ xori r7, r7, 1 /* revert back to original TS */ write_utlb: rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */ /* ERPN = 0 as we don't use memory above 2G */ mr r4, r5 /* EPN = RPN */ ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M) insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */ tlbwe r4, r3, 0 /* Write out the entries */ tlbwe r5, r3, 1 tlbwe r6, r3, 2 addi r8, r8, 1 cmpwi r8, 8 /* Have we completed ? */ bne write_utlb /* make sure we complete the TLB write up */ isync /* * Prepare to jump to the 1:1 mapping. * 1) Extract page size of the tmp mapping * DSIZ = TLB_Word0[22:27] * 2) Calculate the physical address of the address * to jump to. */ rlwinm r10, r24, 0, 22, 27 cmpwi r10, PPC47x_TLB0_4K bne 0f li r10, 0x1000 /* r10 = 4k */ bl 1f 0: /* Defaults to 256M */ lis r10, 0x1000 bcl 20,31,$+4 1: mflr r4 addi r4, r4, (2f-1b) /* virtual address of 2f */ subi r11, r10, 1 /* offsetmask = Pagesize - 1 */ not r10, r11 /* Pagemask = ~(offsetmask) */ and r5, r25, r10 /* Physical page */ and r6, r4, r11 /* offset within the current page */ or r5, r5, r6 /* Physical address for 2f */ /* Switch the TS in MSR to the original one */ mfmsr r8 insrwi r8, r7, 1, 26 mtspr SPRN_SRR1, r8 mtspr SPRN_SRR0, r5 rfi 2: /* Invalidate the tmp mapping */ lis r3, 0x8000 /* Way '0' */ clrrwi r24, r24, 12 /* Clear the valid bit */ tlbwe r24, r3, 0 tlbwe r25, r3, 1 tlbwe r26, r3, 2 /* Make sure we complete the TLB write and flush the shadow TLB */ isync #endif ppc44x_map_done: /* Restore the parameters */ mr r3, r29 mr r4, r30 mr r5, r31 li r0, 0 #else li r0, 0 /* * Set Machine Status Register to a known status, * switch the MMU off and jump to 1: in a single step. */ mr r8, r0 ori r8, r8, MSR_RI|MSR_ME mtspr SPRN_SRR1, r8 addi r8, r4, 1f - relocate_new_kernel mtspr SPRN_SRR0, r8 sync rfi 1: #endif /* from this point address translation is turned off */ /* and interrupts are disabled */ /* set a new stack at the bottom of our page... */ /* (not really needed now) */ addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ stw r0, 0(r1) /* Do the copies */ li r6, 0 /* checksum */ mr r0, r3 b 1f 0: /* top, read another word for the indirection page */ lwzu r0, 4(r3) 1: /* is it a destination page? (r8) */ rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ beq 2f rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ b 0b 2: /* is it an indirection page? (r3) */ rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ beq 2f rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ subi r3, r3, 4 b 0b 2: /* are we done? */ rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ beq 2f b 3f 2: /* is it a source page? (r9) */ rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ beq 0b rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ li r7, PAGE_SIZE / 4 mtctr r7 subi r9, r9, 4 subi r8, r8, 4 9: lwzu r0, 4(r9) /* do the copy */ xor r6, r6, r0 stwu r0, 4(r8) dcbst 0, r8 sync icbi 0, r8 bdnz 9b addi r9, r9, 4 addi r8, r8, 4 b 0b 3: /* To be certain of avoiding problems with self-modifying code * execute a serializing instruction here. */ isync sync mfspr r3, SPRN_PIR /* current core we are running on */ mr r4, r5 /* load physical address of chunk called */ /* jump to the entry point, usually the setup routine */ mtlr r5 blrl 1: b 1b relocate_new_kernel_end: .globl relocate_new_kernel_size relocate_new_kernel_size: .long relocate_new_kernel_end - relocate_new_kernel
aixcc-public/challenge-001-exemplar-source
6,972
arch/powerpc/kernel/cpu_setup_e500.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains low level CPU setup functions. * Kumar Gala <galak@kernel.crashing.org> * Copyright 2009 Freescale Semiconductor, Inc. * * Based on cpu_setup_6xx code by * Benjamin Herrenschmidt <benh@kernel.crashing.org> */ #include <asm/page.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> #include <asm/nohash/mmu-e500.h> #include <asm/asm-offsets.h> #include <asm/mpc85xx.h> _GLOBAL(__e500_icache_setup) mfspr r0, SPRN_L1CSR1 andi. r3, r0, L1CSR1_ICE bnelr /* Already enabled */ oris r0, r0, L1CSR1_CPE@h ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE) mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */ isync blr _GLOBAL(__e500_dcache_setup) mfspr r0, SPRN_L1CSR0 andi. r3, r0, L1CSR0_DCE bnelr /* Already enabled */ msync isync li r0, 0 mtspr SPRN_L1CSR0, r0 /* Disable */ msync isync li r0, (L1CSR0_DCFI | L1CSR0_CLFC) mtspr SPRN_L1CSR0, r0 /* Invalidate */ isync 1: mfspr r0, SPRN_L1CSR0 andi. r3, r0, L1CSR0_CLFC bne+ 1b /* Wait for lock bits reset */ oris r0, r0, L1CSR0_CPE@h ori r0, r0, L1CSR0_DCE msync isync mtspr SPRN_L1CSR0, r0 /* Enable */ isync blr /* * FIXME - we haven't yet done testing to determine a reasonable default * value for PW20_WAIT_IDLE_BIT. */ #define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ _GLOBAL(setup_pw20_idle) mfspr r3, SPRN_PWRMGTCR0 /* Set PW20_WAIT bit, enable pw20 state*/ ori r3, r3, PWRMGTCR0_PW20_WAIT li r11, PW20_WAIT_IDLE_BIT /* Set Automatic PW20 Core Idle Count */ rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT mtspr SPRN_PWRMGTCR0, r3 blr /* * FIXME - we haven't yet done testing to determine a reasonable default * value for AV_WAIT_IDLE_BIT. */ #define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ _GLOBAL(setup_altivec_idle) mfspr r3, SPRN_PWRMGTCR0 /* Enable Altivec Idle */ oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h li r11, AV_WAIT_IDLE_BIT /* Set Automatic AltiVec Idle Count */ rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT mtspr SPRN_PWRMGTCR0, r3 blr #ifdef CONFIG_PPC_E500MC _GLOBAL(__setup_cpu_e6500) mflr r6 #ifdef CONFIG_PPC64 bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f bl setup_lrat_ivor 1: #endif bl setup_pw20_idle bl setup_altivec_idle bl __setup_cpu_e5500 mtlr r6 blr #endif /* CONFIG_PPC_E500MC */ #ifdef CONFIG_PPC32 #ifdef CONFIG_PPC_E500 #ifndef CONFIG_PPC_E500MC _GLOBAL(__setup_cpu_e500v1) _GLOBAL(__setup_cpu_e500v2) mflr r4 bl __e500_icache_setup bl __e500_dcache_setup bl __setup_e500_ivors #if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI) /* Ensure that RFXE is set */ mfspr r3,SPRN_HID1 oris r3,r3,HID1_RFXE@h mtspr SPRN_HID1,r3 #endif mtlr r4 blr #else /* CONFIG_PPC_E500MC */ _GLOBAL(__setup_cpu_e500mc) _GLOBAL(__setup_cpu_e5500) mflr r5 bl __e500_icache_setup bl __e500_dcache_setup bl __setup_e500mc_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine * this is MMUCFG[LPIDSIZE]. */ mfspr r3, SPRN_MMUCFG rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE beq 1f bl __setup_ehv_ivors b 2f 1: lwz r3, CPU_SPEC_FEATURES(r4) /* We need this check as cpu_setup is also called for * the secondary cores. So, if we have already cleared * the feature on the primary core, avoid doing it on the * secondary core. */ andi. r6, r3, CPU_FTR_EMB_HV beq 2f rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV stw r3, CPU_SPEC_FEATURES(r4) 2: mtlr r5 blr #endif /* CONFIG_PPC_E500MC */ #endif /* CONFIG_PPC_E500 */ #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC_BOOK3E_64 _GLOBAL(__restore_cpu_e6500) mflr r5 bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f bl setup_lrat_ivor 1: bl setup_pw20_idle bl setup_altivec_idle bl __restore_cpu_e5500 mtlr r5 blr _GLOBAL(__restore_cpu_e5500) mflr r4 bl __e500_icache_setup bl __e500_dcache_setup bl __setup_base_ivors bl setup_perfmon_ivor bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine * this is MMUCFG[LPIDSIZE]. */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f bl setup_ehv_ivors 1: mtlr r4 blr _GLOBAL(__setup_cpu_e5500) mflr r5 bl __e500_icache_setup bl __e500_dcache_setup bl __setup_base_ivors bl setup_perfmon_ivor bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine * this is MMUCFG[LPIDSIZE]. */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f bl setup_ehv_ivors b 2f 1: ld r10,CPU_SPEC_FEATURES(r4) LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV) andc r10,r10,r9 std r10,CPU_SPEC_FEATURES(r4) 2: mtlr r5 blr #endif /* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */ _GLOBAL(flush_dcache_L1) mfmsr r10 wrteei 0 mfspr r3,SPRN_L1CFG0 rlwinm r5,r3,9,3 /* Extract cache block size */ twlgti r5,1 /* Only 32 and 64 byte cache blocks * are currently defined. */ li r4,32 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - * log2(number of ways) */ slw r5,r4,r5 /* r5 = cache block size */ rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ mulli r7,r7,13 /* An 8-way cache will require 13 * loads per set. */ slw r7,r7,r6 /* save off HID0 and set DCFA */ mfspr r8,SPRN_HID0 ori r9,r8,HID0_DCFA@l mtspr SPRN_HID0,r9 isync LOAD_REG_IMMEDIATE(r6, KERNELBASE) mr r4, r6 mtctr r7 1: lwz r3,0(r4) /* Load... */ add r4,r4,r5 bdnz 1b msync mr r4, r6 mtctr r7 1: dcbf 0,r4 /* ...and flush. */ add r4,r4,r5 bdnz 1b /* restore HID0 */ mtspr SPRN_HID0,r8 isync wrtee r10 blr has_L2_cache: /* skip L2 cache on P2040/P2040E as they have no L2 cache */ mfspr r3, SPRN_SVR /* shift right by 8 bits and clear E bit of SVR */ rlwinm r4, r3, 24, ~0x800 lis r3, SVR_P2040@h ori r3, r3, SVR_P2040@l cmpw r4, r3 beq 1f li r3, 1 blr 1: li r3, 0 blr /* flush backside L2 cache */ flush_backside_L2_cache: mflr r10 bl has_L2_cache mtlr r10 cmpwi r3, 0 beq 2f /* Flush the L2 cache */ mfspr r3, SPRN_L2CSR0 ori r3, r3, L2CSR0_L2FL@l msync isync mtspr SPRN_L2CSR0,r3 isync /* check if it is complete */ 1: mfspr r3,SPRN_L2CSR0 andi. r3, r3, L2CSR0_L2FL@l bne 1b 2: blr _GLOBAL(cpu_down_flush_e500v2) mflr r0 bl flush_dcache_L1 mtlr r0 blr _GLOBAL(cpu_down_flush_e500mc) _GLOBAL(cpu_down_flush_e5500) mflr r0 bl flush_dcache_L1 bl flush_backside_L2_cache mtlr r0 blr /* L1 Data Cache of e6500 contains no modified data, no flush is required */ _GLOBAL(cpu_down_flush_e6500) blr
aixcc-public/challenge-001-exemplar-source
8,714
arch/powerpc/kernel/entry_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). * * This file contains the system call entry code, context switch * code, and exception/interrupt return code for PowerPC. */ #include <linux/errno.h> #include <linux/err.h> #include <asm/cache.h> #include <asm/unistd.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/thread_info.h> #include <asm/code-patching-asm.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/firmware.h> #include <asm/bug.h> #include <asm/ptrace.h> #include <asm/irqflags.h> #include <asm/hw_irq.h> #include <asm/context_tracking.h> #include <asm/ppc-opcode.h> #include <asm/barrier.h> #include <asm/export.h> #include <asm/asm-compat.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/exception-64s.h> #else #include <asm/exception-64e.h> #endif #include <asm/feature-fixups.h> #include <asm/kup.h> /* * System calls. */ .section ".text" #ifdef CONFIG_PPC_BOOK3S_64 #define FLUSH_COUNT_CACHE \ 1: nop; \ patch_site 1b, patch__call_flush_branch_caches1; \ 1: nop; \ patch_site 1b, patch__call_flush_branch_caches2; \ 1: nop; \ patch_site 1b, patch__call_flush_branch_caches3 .macro nops number .rept \number nop .endr .endm .balign 32 .global flush_branch_caches flush_branch_caches: /* Save LR into r9 */ mflr r9 // Flush the link stack .rept 64 bl .+4 .endr b 1f nops 6 .balign 32 /* Restore LR */ 1: mtlr r9 // If we're just flushing the link stack, return here 3: nop patch_site 3b patch__flush_link_stack_return li r9,0x7fff mtctr r9 PPC_BCCTR_FLUSH 2: nop patch_site 2b patch__flush_count_cache_return nops 3 .rept 278 .balign 32 PPC_BCCTR_FLUSH nops 7 .endr blr #else #define FLUSH_COUNT_CACHE #endif /* CONFIG_PPC_BOOK3S_64 */ /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state * of the other is restored from its kernel stack. The memory * management hardware is updated to the second process's state. * Finally, we can return to the second process, via interrupt_return. * On entry, r3 points to the THREAD for the current task, r4 * points to the THREAD for the new task. * * Note: there are two ways to get to the "going out" portion * of this code; either by coming in via the entry (_switch) * or via "fork" which must set up an environment equivalent * to the "_switch" path. If you change this you'll have to change * the fork code also. * * The code which creates the new task context is in 'copy_thread' * in arch/powerpc/kernel/process.c */ .align 7 _GLOBAL(_switch) mflr r0 std r0,16(r1) stdu r1,-SWITCH_FRAME_SIZE(r1) /* r3-r13 are caller saved -- Cort */ SAVE_NVGPRS(r1) std r0,_NIP(r1) /* Return to switch caller */ mfcr r23 std r23,_CCR(r1) std r1,KSP(r3) /* Set old stack pointer */ kuap_check_amr r9, r10 FLUSH_COUNT_CACHE /* Clobbers r9, ctr */ /* * On SMP kernels, care must be taken because a task may be * scheduled off CPUx and on to CPUy. Memory ordering must be * considered. * * Cacheable stores on CPUx will be visible when the task is * scheduled on CPUy by virtue of the core scheduler barriers * (see "Notes on Program-Order guarantees on SMP systems." in * kernel/sched/core.c). * * Uncacheable stores in the case of involuntary preemption must * be taken care of. The smp_mb__after_spinlock() in __schedule() * is implemented as hwsync on powerpc, which orders MMIO too. So * long as there is an hwsync in the context switch path, it will * be executed on the source CPU after the task has performed * all MMIO ops on that CPU, and on the destination CPU before the * task performs any MMIO ops there. */ /* * The kernel context switch path must contain a spin_lock, * which contains larx/stcx, which will clear any reservation * of the task being switched. */ #ifdef CONFIG_PPC_BOOK3S /* Cancel all explict user streams as they will have no use after context * switch and will stop the HW from creating streams itself */ DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6) #endif addi r6,r4,-THREAD /* Convert THREAD to 'current' */ std r6,PACACURRENT(r13) /* Set new 'current' */ #if defined(CONFIG_STACKPROTECTOR) ld r6, TASK_CANARY(r6) std r6, PACA_CANARY(r13) #endif ld r8,KSP(r4) /* new stack pointer */ #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION b 2f END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) BEGIN_FTR_SECTION clrrdi r6,r8,28 /* get its ESID */ clrrdi r9,r1,28 /* get current sp ESID */ FTR_SECTION_ELSE clrrdi r6,r8,40 /* get its 1T ESID */ clrrdi r9,r1,40 /* get current sp 1T ESID */ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT) clrldi. r0,r6,2 /* is new ESID c00000000? */ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ cror eq,4*cr1+eq,eq beq 2f /* if yes, don't slbie it */ /* Bolt in the new stack SLB entry */ ld r7,KSP_VSID(r4) /* Get new stack's VSID */ oris r0,r6,(SLB_ESID_V)@h ori r0,r0,(SLB_NUM_BOLTED-1)@l BEGIN_FTR_SECTION li r9,MMU_SEGSIZE_1T /* insert B field */ oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Update the last bolted SLB. No write barriers are needed * here, provided we only update the current CPU's SLB shadow * buffer. */ ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ li r12,SLBSHADOW_STACKVSID STDX_BE r7,r12,r9 /* Save VSID */ li r12,SLBSHADOW_STACKESID STDX_BE r0,r12,r9 /* Save ESID */ /* No need to check for MMU_FTR_NO_SLBIE_B here, since when * we have 1TB segments, the only CPUs known to have the errata * only support less than 1TB of system memory and we'll never * actually hit this code path. */ isync slbie r6 BEGIN_FTR_SECTION slbie r6 /* Workaround POWER5 < DD2.1 issue */ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) slbmte r7,r0 isync 2: #endif /* CONFIG_PPC_64S_HASH_MMU */ clrrdi r7, r8, THREAD_SHIFT /* base of new stack */ /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE because we don't need to leave the 288-byte ABI gap at the top of the kernel stack. */ addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE /* * PMU interrupts in radix may come in here. They will use r1, not * PACAKSAVE, so this stack switch will not cause a problem. They * will store to the process stack, which may then be migrated to * another CPU. However the rq lock release on this CPU paired with * the rq lock acquire on the new CPU before the stack becomes * active on the new CPU, will order those stores. */ mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) ld r6,_CCR(r1) mtcrf 0xFF,r6 /* r3-r13 are destroyed -- Cort */ REST_NVGPRS(r1) /* convert old thread to its task_struct for return value */ addi r3,r3,-THREAD ld r7,_NIP(r1) /* Return to _switch caller in new task */ mtlr r7 addi r1,r1,SWITCH_FRAME_SIZE blr _GLOBAL(enter_prom) mflr r0 std r0,16(r1) stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */ /* Because PROM is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers * PROM might touch to the stack. (r0, r3-r13 are caller saved) */ SAVE_GPR(2, r1) SAVE_GPR(13, r1) SAVE_NVGPRS(r1) mfcr r10 mfmsr r11 std r10,_CCR(r1) std r11,_MSR(r1) /* Put PROM address in SRR0 */ mtsrr0 r4 /* Setup our trampoline return addr in LR */ bcl 20,31,$+4 0: mflr r4 addi r4,r4,(1f - 0b) mtlr r4 /* Prepare a 32-bit mode big endian MSR */ #ifdef CONFIG_PPC_BOOK3E_64 rlwinm r11,r11,0,1,31 mtsrr1 r11 rfi #else /* CONFIG_PPC_BOOK3E_64 */ LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE) andc r11,r11,r12 mtsrr1 r11 RFI_TO_KERNEL #endif /* CONFIG_PPC_BOOK3E_64 */ 1: /* Return from OF */ FIXUP_ENDIAN /* Just make sure that r1 top 32 bits didn't get * corrupt by OF */ rldicl r1,r1,0,32 /* Restore the MSR (back to 64 bits) */ ld r0,_MSR(r1) MTMSRD(r0) isync /* Restore other registers */ REST_GPR(2, r1) REST_GPR(13, r1) REST_NVGPRS(r1) ld r4,_CCR(r1) mtcr r4 addi r1,r1,SWITCH_FRAME_SIZE ld r0,16(r1) mtlr r0 blr
aixcc-public/challenge-001-exemplar-source
10,505
arch/powerpc/kernel/l2cr_6xx.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* L2CR functions Copyright © 1997-1998 by PowerLogix R & D, Inc. */ /* Thur, Dec. 12, 1998. - First public release, contributed by PowerLogix. *********** Sat, Aug. 7, 1999. - Terry: Made sure code disabled interrupts before running. (Previously it was assumed interrupts were already disabled). - Terry: Updated for tentative G4 support. 4MB of memory is now flushed instead of 2MB. (Prob. only 3 is necessary). - Terry: Updated for workaround to HID0[DPM] processor bug during global invalidates. *********** Thu, July 13, 2000. - Terry: Added isync to correct for an errata. 22 August 2001. - DanM: Finally added the 7450 patch I've had for the past several months. The L2CR is similar, but I'm going to assume the user of this functions knows what they are doing. Author: Terry Greeniaus (tgree@phys.ualberta.ca) Please e-mail updates to this file to me, thanks! */ #include <asm/processor.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> #include <asm/cache.h> #include <asm/page.h> #include <asm/feature-fixups.h> /* Usage: When setting the L2CR register, you must do a few special things. If you are enabling the cache, you must perform a global invalidate. If you are disabling the cache, you must flush the cache contents first. This routine takes care of doing these things. When first enabling the cache, make sure you pass in the L2CR you want, as well as passing in the global invalidate bit set. A global invalidate will only be performed if the L2I bit is set in applyThis. When enabling the cache, you should also set the L2E bit in applyThis. If you want to modify the L2CR contents after the cache has been enabled, the recommended procedure is to first call __setL2CR(0) to disable the cache and then call it again with the new values for L2CR. Examples: _setL2CR(0) - disables the cache _setL2CR(0xB3A04000) - enables my G3 upgrade card: - L2E set to turn on the cache - L2SIZ set to 1MB - L2CLK set to 1:1 - L2RAM set to pipelined synchronous late-write - L2I set to perform a global invalidation - L2OH set to 0.5 nS - L2DF set because this upgrade card requires it A similar call should work for your card. You need to know the correct setting for your card and then place them in the fields I have outlined above. Other fields support optional features, such as L2DO which caches only data, or L2TS which causes cache pushes from the L1 cache to go to the L2 cache instead of to main memory. IMPORTANT: Starting with the 7450, the bits in this register have moved or behave differently. The Enable, Parity Enable, Size, and L2 Invalidate are the only bits that have not moved. The size is read-only for these processors with internal L2 cache, and the invalidate is a control as well as status. -- Dan */ /* * Summary: this procedure ignores the L2I bit in the value passed in, * flushes the cache if it was already enabled, always invalidates the * cache, then enables the cache if the L2E bit is set in the value * passed in. * -- paulus. */ _GLOBAL(_set_L2CR) /* Make sure this is a 750 or 7400 chip */ BEGIN_FTR_SECTION li r3,-1 blr END_FTR_SECTION_IFCLR(CPU_FTR_L2CR) mflr r9 /* Stop DST streams */ BEGIN_FTR_SECTION PPC_DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Turn off interrupts and data relocation. */ mfmsr r7 /* Save MSR in r7 */ rlwinm r4,r7,0,17,15 rlwinm r4,r4,0,28,26 /* Turn off DR bit */ sync mtmsr r4 isync /* Before we perform the global invalidation, we must disable dynamic * power management via HID0[DPM] to work around a processor bug where * DPM can possibly interfere with the state machine in the processor * that invalidates the L2 cache tags. */ mfspr r8,SPRN_HID0 /* Save HID0 in r8 */ rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */ sync mtspr SPRN_HID0,r4 /* Disable DPM */ sync /* Get the current enable bit of the L2CR into r4 */ mfspr r4,SPRN_L2CR /* Tweak some bits */ rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */ rlwinm r3,r3,0,1,31 /* Turn off the enable bit */ /* Check to see if we need to flush */ rlwinm. r4,r4,0,0,0 beq 2f /* Flush the cache. First, read the first 4MB of memory (physical) to * put new data in the cache. (Actually we only need * the size of the L2 cache plus the size of the L1 cache, but 4MB will * cover everything just to be safe). */ /**** Might be a good idea to set L2DO here - to prevent instructions from getting into the cache. But since we invalidate the next time we enable the cache it doesn't really matter. Don't do this unless you accommodate all processor variations. The bit moved on the 7450..... ****/ BEGIN_FTR_SECTION /* Disable L2 prefetch on some 745x and try to ensure * L2 prefetch engines are idle. As explained by errata * text, we can't be sure they are, we just hope very hard * that well be enough (sic !). At least I noticed Apple * doesn't even bother doing the dcbf's here... */ mfspr r4,SPRN_MSSCR0 rlwinm r4,r4,0,0,29 sync mtspr SPRN_MSSCR0,r4 sync isync lis r4,KERNELBASE@h dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) /* TODO: use HW flush assist when available */ lis r4,0x0002 mtctr r4 li r4,0 1: lwzx r0,0,r4 addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b isync /* Now, flush the first 4MB of memory */ lis r4,0x0002 mtctr r4 li r4,0 sync 1: dcbf 0,r4 addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b 2: /* Set up the L2CR configuration bits (and switch L2 off) */ /* CPU errata: Make sure the mtspr below is already in the * L1 icache */ b 20f .balign L1_CACHE_BYTES 22: sync mtspr SPRN_L2CR,r3 sync b 23f 20: b 21f 21: sync isync b 22b 23: /* Perform a global invalidation */ oris r3,r3,0x0020 sync mtspr SPRN_L2CR,r3 sync isync /* For errata */ BEGIN_FTR_SECTION /* On the 7450, we wait for the L2I bit to clear...... */ 10: mfspr r3,SPRN_L2CR andis. r4,r3,0x0020 bne 10b b 11f END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) /* Wait for the invalidation to complete */ 3: mfspr r3,SPRN_L2CR rlwinm. r4,r3,0,31,31 bne 3b 11: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */ sync mtspr SPRN_L2CR,r3 sync /* See if we need to enable the cache */ cmplwi r5,0 beq 4f /* Enable the cache */ oris r3,r3,0x8000 mtspr SPRN_L2CR,r3 sync /* Enable L2 HW prefetch on 744x/745x */ BEGIN_FTR_SECTION mfspr r3,SPRN_MSSCR0 ori r3,r3,3 sync mtspr SPRN_MSSCR0,r3 sync isync END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) 4: /* Restore HID0[DPM] to whatever it was before */ sync mtspr 1008,r8 sync /* Restore MSR (restores EE and DR bits to original state) */ mtmsr r7 isync mtlr r9 blr _GLOBAL(_get_L2CR) /* Return the L2CR contents */ li r3,0 BEGIN_FTR_SECTION mfspr r3,SPRN_L2CR END_FTR_SECTION_IFSET(CPU_FTR_L2CR) blr /* * Here is a similar routine for dealing with the L3 cache * on the 745x family of chips */ _GLOBAL(_set_L3CR) /* Make sure this is a 745x chip */ BEGIN_FTR_SECTION li r3,-1 blr END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) /* Turn off interrupts and data relocation. */ mfmsr r7 /* Save MSR in r7 */ rlwinm r4,r7,0,17,15 rlwinm r4,r4,0,28,26 /* Turn off DR bit */ sync mtmsr r4 isync /* Stop DST streams */ PPC_DSSALL sync /* Get the current enable bit of the L3CR into r4 */ mfspr r4,SPRN_L3CR /* Tweak some bits */ rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */ rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */ rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ /* Check to see if we need to flush */ rlwinm. r4,r4,0,0,0 beq 2f /* Flush the cache. */ /* TODO: use HW flush assist */ lis r4,0x0008 mtctr r4 li r4,0 1: lwzx r0,0,r4 dcbf 0,r4 addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b 2: /* Set up the L3CR configuration bits (and switch L3 off) */ sync mtspr SPRN_L3CR,r3 sync oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */ mtspr SPRN_L3CR,r3 sync oris r3,r3,L3CR_L3CLKEN@h /* Set clken */ mtspr SPRN_L3CR,r3 sync /* Wait for stabilize */ li r0,256 mtctr r0 1: bdnz 1b /* Perform a global invalidation */ ori r3,r3,0x0400 sync mtspr SPRN_L3CR,r3 sync isync /* We wait for the L3I bit to clear...... */ 10: mfspr r3,SPRN_L3CR andi. r4,r3,0x0400 bne 10b /* Clear CLKEN */ rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ mtspr SPRN_L3CR,r3 sync /* Wait for stabilize */ li r0,256 mtctr r0 1: bdnz 1b /* See if we need to enable the cache */ cmplwi r5,0 beq 4f /* Enable the cache */ oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h mtspr SPRN_L3CR,r3 sync /* Wait for stabilize */ li r0,256 mtctr r0 1: bdnz 1b /* Restore MSR (restores EE and DR bits to original state) */ 4: mtmsr r7 isync blr _GLOBAL(_get_L3CR) /* Return the L3CR contents */ li r3,0 BEGIN_FTR_SECTION mfspr r3,SPRN_L3CR END_FTR_SECTION_IFSET(CPU_FTR_L3CR) blr /* --- End of PowerLogix code --- */ /* flush_disable_L1() - Flush and disable L1 cache * * clobbers r0, r3, ctr, cr0 * Must be called with interrupts disabled and MMU enabled. */ _GLOBAL(__flush_disable_L1) /* Stop pending alitvec streams and memory accesses */ BEGIN_FTR_SECTION PPC_DSSALL END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) sync /* Load counter to 0x4000 cache lines (512k) and * load cache with datas */ li r3,0x4000 /* 512kB / 32B */ mtctr r3 lis r3,KERNELBASE@h 1: lwz r0,0(r3) addi r3,r3,0x0020 /* Go to start of next cache line */ bdnz 1b isync sync /* Now flush those cache lines */ li r3,0x4000 /* 512kB / 32B */ mtctr r3 lis r3,KERNELBASE@h 1: dcbf 0,r3 addi r3,r3,0x0020 /* Go to start of next cache line */ bdnz 1b sync /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */ mfspr r3,SPRN_HID0 rlwinm r3,r3,0,18,15 mtspr SPRN_HID0,r3 sync isync blr /* inval_enable_L1 - Invalidate and enable L1 cache * * Assumes L1 is already disabled and MSR:EE is off * * clobbers r3 */ _GLOBAL(__inval_enable_L1) /* Enable and then Flash inval the instruction & data cache */ mfspr r3,SPRN_HID0 ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI sync isync mtspr SPRN_HID0,r3 xori r3,r3, HID0_ICFI|HID0_DCI mtspr SPRN_HID0,r3 sync blr _ASM_NOKPROBE_SYMBOL(__inval_enable_L1)
aixcc-public/challenge-001-exemplar-source
3,975
arch/powerpc/kernel/cpu_setup_ppc970.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains low level CPU setup functions. * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) */ #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cache.h> _GLOBAL(__cpu_preinit_ppc970) /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 beqlr /* Make sure HID4:rm_ci is off before MMU is turned off, that large * pages are enabled with HID4:61 and clear HID5:DCBZ_size and * HID5:DCBZ32_ill */ li r0,0 mfspr r3,SPRN_HID4 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ rldimi r3,r0,2,61 /* clear bit 61 (lg_pg_en) */ sync mtspr SPRN_HID4,r3 isync sync mfspr r3,SPRN_HID5 rldimi r3,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */ sync mtspr SPRN_HID5,r3 isync sync /* Setup some basic HID1 features */ mfspr r0,SPRN_HID1 li r3,0x1200 /* enable i-fetch cacheability */ sldi r3,r3,44 /* and prefetch */ or r0,r0,r3 mtspr SPRN_HID1,r0 mtspr SPRN_HID1,r0 isync /* Clear HIOR */ li r0,0 sync mtspr SPRN_HIOR,0 /* Clear interrupt prefix */ isync blr /* Definitions for the table use to save CPU states */ #define CS_HID0 0 #define CS_HID1 8 #define CS_HID4 16 #define CS_HID5 24 #define CS_SIZE 32 .data .balign L1_CACHE_BYTES,0 cpu_state_storage: .space CS_SIZE .balign L1_CACHE_BYTES,0 .text _GLOBAL(__setup_cpu_ppc970) /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 beq no_hv_mode mfspr r0,SPRN_HID0 li r11,5 /* clear DOZE and SLEEP */ rldimi r0,r11,52,8 /* set NAP and DPM */ li r11,0 rldimi r0,r11,32,31 /* clear EN_ATTN */ b load_hids /* Jump to shared code */ _GLOBAL(__setup_cpu_ppc970MP) /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 beq no_hv_mode mfspr r0,SPRN_HID0 li r11,0x15 /* clear DOZE and SLEEP */ rldimi r0,r11,52,6 /* set DEEPNAP, NAP and DPM */ li r11,0 rldimi r0,r11,32,31 /* clear EN_ATTN */ load_hids: mtspr SPRN_HID0,r0 mfspr r0,SPRN_HID0 mfspr r0,SPRN_HID0 mfspr r0,SPRN_HID0 mfspr r0,SPRN_HID0 mfspr r0,SPRN_HID0 mfspr r0,SPRN_HID0 sync isync /* Try to set LPES = 01 in HID4 */ mfspr r0,SPRN_HID4 clrldi r0,r0,1 /* clear LPES0 */ ori r0,r0,HID4_LPES1 /* set LPES1 */ sync mtspr SPRN_HID4,r0 isync /* Save away cpu state */ LOAD_REG_ADDR(r5,cpu_state_storage) /* Save HID0,1,4 and 5 */ mfspr r3,SPRN_HID0 std r3,CS_HID0(r5) mfspr r3,SPRN_HID1 std r3,CS_HID1(r5) mfspr r4,SPRN_HID4 std r4,CS_HID4(r5) mfspr r3,SPRN_HID5 std r3,CS_HID5(r5) /* See if we successfully set LPES1 to 1; if not we are in Apple mode */ andi. r4,r4,HID4_LPES1 bnelr no_hv_mode: /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */ ld r5,CPU_SPEC_FEATURES(r4) LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE) andc r5,r5,r6 std r5,CPU_SPEC_FEATURES(r4) blr /* Called with no MMU context (typically MSR:IR/DR off) to * restore CPU state as backed up by the previous * function. This does not include cache setting */ _GLOBAL(__restore_cpu_ppc970) /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 beqlr LOAD_REG_ADDR(r5,cpu_state_storage) /* Before accessing memory, we make sure rm_ci is clear */ li r0,0 mfspr r3,SPRN_HID4 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ sync mtspr SPRN_HID4,r3 isync sync /* Clear interrupt prefix */ li r0,0 sync mtspr SPRN_HIOR,0 isync /* Restore HID0 */ ld r3,CS_HID0(r5) sync isync mtspr SPRN_HID0,r3 mfspr r3,SPRN_HID0 mfspr r3,SPRN_HID0 mfspr r3,SPRN_HID0 mfspr r3,SPRN_HID0 mfspr r3,SPRN_HID0 mfspr r3,SPRN_HID0 sync isync /* Restore HID1 */ ld r3,CS_HID1(r5) sync isync mtspr SPRN_HID1,r3 mtspr SPRN_HID1,r3 sync isync /* Restore HID4 */ ld r3,CS_HID4(r5) sync isync mtspr SPRN_HID4,r3 sync isync /* Restore HID5 */ ld r3,CS_HID5(r5) sync isync mtspr SPRN_HID5,r3 sync isync blr
aixcc-public/challenge-001-exemplar-source
7,079
arch/powerpc/kernel/vector.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/reg.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/export.h> #include <asm/asm-compat.h> /* * Load state from memory into VMX registers including VSCR. * Assumes the caller has enabled VMX in the MSR. */ _GLOBAL(load_vr_state) li r4,VRSTATE_VSCR lvx v0,r4,r3 mtvscr v0 REST_32VRS(0,r4,r3) blr EXPORT_SYMBOL(load_vr_state) _ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */ /* * Store VMX state into memory, including VSCR. * Assumes the caller has enabled VMX in the MSR. */ _GLOBAL(store_vr_state) SAVE_32VRS(0, r4, r3) mfvscr v0 li r4, VRSTATE_VSCR stvx v0, r4, r3 blr EXPORT_SYMBOL(store_vr_state) /* * Disable VMX for the task which had it previously, * and save its vector registers in its thread_struct. * Enables the VMX for use in the kernel on return. * On SMP we know the VMX is free, since we give it up every * switch (ie, no lazy save of the vector registers). * * Note that on 32-bit this can only use registers that will be * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. */ _GLOBAL(load_up_altivec) mfmsr r5 /* grab the current MSR */ #ifdef CONFIG_PPC_BOOK3S_64 /* interrupt doesn't set MSR[RI] and HPT can fault on current access */ ori r5,r5,MSR_RI #endif oris r5,r5,MSR_VEC@h MTMSRD(r5) /* enable use of AltiVec now */ isync /* * While userspace in general ignores VRSAVE, glibc uses it as a boolean * to optimise userspace context save/restore. Whenever we take an * altivec unavailable exception we must set VRSAVE to something non * zero. Set it to all 1s. See also the programming note in the ISA. */ mfspr r4,SPRN_VRSAVE cmpwi 0,r4,0 bne+ 1f li r4,-1 mtspr SPRN_VRSAVE,r4 1: /* enable use of VMX after return */ #ifdef CONFIG_PPC32 addi r5,r2,THREAD oris r9,r9,MSR_VEC@h #else ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ oris r12,r12,MSR_VEC@h std r12,_MSR(r1) #ifdef CONFIG_PPC_BOOK3S_64 li r4,0 stb r4,PACASRR_VALID(r13) #endif #endif li r4,1 stb r4,THREAD_LOAD_VEC(r5) addi r6,r5,THREAD_VRSTATE li r10,VRSTATE_VSCR stw r4,THREAD_USED_VR(r5) lvx v0,r10,r6 mtvscr v0 REST_32VRS(0,r4,r6) /* restore registers and return */ blr _ASM_NOKPROBE_SYMBOL(load_up_altivec) /* * save_altivec(tsk) * Save the vector registers to its thread_struct */ _GLOBAL(save_altivec) addi r3,r3,THREAD /* want THREAD of task */ PPC_LL r7,THREAD_VRSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) PPC_LCMPI 0,r7,0 bne 2f addi r7,r3,THREAD_VRSTATE 2: SAVE_32VRS(0,r4,r7) mfvscr v0 li r4,VRSTATE_VSCR stvx v0,r4,r7 blr #ifdef CONFIG_VSX #ifdef CONFIG_PPC32 #error This asm code isn't ready for 32-bit kernels #endif /* * load_up_vsx(unused, unused, tsk) * Disable VSX for the task which had it previously, * and save its vector registers in its thread_struct. * Reuse the fp and vsx saves, but first check to see if they have * been saved already. */ _GLOBAL(load_up_vsx) /* Load FP and VSX registers if they haven't been done yet */ andi. r5,r12,MSR_FP beql+ load_up_fpu /* skip if already loaded */ andis. r5,r12,MSR_VEC@h beql+ load_up_altivec /* skip if already loaded */ #ifdef CONFIG_PPC_BOOK3S_64 /* interrupt doesn't set MSR[RI] and HPT can fault on current access */ li r5,MSR_RI mtmsrd r5,1 #endif ld r4,PACACURRENT(r13) addi r4,r4,THREAD /* Get THREAD */ li r6,1 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ /* enable use of VSX after return */ oris r12,r12,MSR_VSX@h std r12,_MSR(r1) li r4,0 stb r4,PACASRR_VALID(r13) b fast_interrupt_return_srr #endif /* CONFIG_VSX */ /* * The routines below are in assembler so we can closely control the * usage of floating-point registers. These routines must be called * with preempt disabled. */ .data #ifdef CONFIG_PPC32 fpzero: .long 0 fpone: .long 0x3f800000 /* 1.0 in single-precision FP */ fphalf: .long 0x3f000000 /* 0.5 in single-precision FP */ #define LDCONST(fr, name) \ lis r11,name@ha; \ lfs fr,name@l(r11) #else fpzero: .quad 0 fpone: .quad 0x3ff0000000000000 /* 1.0 */ fphalf: .quad 0x3fe0000000000000 /* 0.5 */ #define LDCONST(fr, name) \ addis r11,r2,name@toc@ha; \ lfd fr,name@toc@l(r11) #endif .text /* * Internal routine to enable floating point and set FPSCR to 0. * Don't call it from C; it doesn't use the normal calling convention. */ fpenable: #ifdef CONFIG_PPC32 stwu r1,-64(r1) #else stdu r1,-64(r1) #endif mfmsr r10 ori r11,r10,MSR_FP mtmsr r11 isync stfd fr0,24(r1) stfd fr1,16(r1) stfd fr31,8(r1) LDCONST(fr1, fpzero) mffs fr31 MTFSF_L(fr1) blr fpdisable: mtlr r12 MTFSF_L(fr31) lfd fr31,8(r1) lfd fr1,16(r1) lfd fr0,24(r1) mtmsr r10 isync addi r1,r1,64 blr /* * Vector add, floating point. */ _GLOBAL(vaddfp) mflr r12 bl fpenable li r0,4 mtctr r0 li r6,0 1: lfsx fr0,r4,r6 lfsx fr1,r5,r6 fadds fr0,fr0,fr1 stfsx fr0,r3,r6 addi r6,r6,4 bdnz 1b b fpdisable /* * Vector subtract, floating point. */ _GLOBAL(vsubfp) mflr r12 bl fpenable li r0,4 mtctr r0 li r6,0 1: lfsx fr0,r4,r6 lfsx fr1,r5,r6 fsubs fr0,fr0,fr1 stfsx fr0,r3,r6 addi r6,r6,4 bdnz 1b b fpdisable /* * Vector multiply and add, floating point. */ _GLOBAL(vmaddfp) mflr r12 bl fpenable stfd fr2,32(r1) li r0,4 mtctr r0 li r7,0 1: lfsx fr0,r4,r7 lfsx fr1,r5,r7 lfsx fr2,r6,r7 fmadds fr0,fr0,fr2,fr1 stfsx fr0,r3,r7 addi r7,r7,4 bdnz 1b lfd fr2,32(r1) b fpdisable /* * Vector negative multiply and subtract, floating point. */ _GLOBAL(vnmsubfp) mflr r12 bl fpenable stfd fr2,32(r1) li r0,4 mtctr r0 li r7,0 1: lfsx fr0,r4,r7 lfsx fr1,r5,r7 lfsx fr2,r6,r7 fnmsubs fr0,fr0,fr2,fr1 stfsx fr0,r3,r7 addi r7,r7,4 bdnz 1b lfd fr2,32(r1) b fpdisable /* * Vector reciprocal estimate. We just compute 1.0/x. * r3 -> destination, r4 -> source. */ _GLOBAL(vrefp) mflr r12 bl fpenable li r0,4 LDCONST(fr1, fpone) mtctr r0 li r6,0 1: lfsx fr0,r4,r6 fdivs fr0,fr1,fr0 stfsx fr0,r3,r6 addi r6,r6,4 bdnz 1b b fpdisable /* * Vector reciprocal square-root estimate, floating point. * We use the frsqrte instruction for the initial estimate followed * by 2 iterations of Newton-Raphson to get sufficient accuracy. * r3 -> destination, r4 -> source. */ _GLOBAL(vrsqrtefp) mflr r12 bl fpenable stfd fr2,32(r1) stfd fr3,40(r1) stfd fr4,48(r1) stfd fr5,56(r1) li r0,4 LDCONST(fr4, fpone) LDCONST(fr5, fphalf) mtctr r0 li r6,0 1: lfsx fr0,r4,r6 frsqrte fr1,fr0 /* r = frsqrte(s) */ fmuls fr3,fr1,fr0 /* r * s */ fmuls fr2,fr1,fr5 /* r * 0.5 */ fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ fmuls fr3,fr1,fr0 /* r * s */ fmuls fr2,fr1,fr5 /* r * 0.5 */ fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ stfsx fr1,r3,r6 addi r6,r6,4 bdnz 1b lfd fr5,56(r1) lfd fr4,48(r1) lfd fr3,40(r1) lfd fr2,32(r1) b fpdisable
aixcc-public/challenge-001-exemplar-source
1,152
arch/powerpc/kernel/epapr_hcalls.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2012 Freescale Semiconductor, Inc. */ #include <linux/threads.h> #include <asm/epapr_hcalls.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-compat.h> #include <asm/asm-offsets.h> #include <asm/export.h> #ifndef CONFIG_PPC64 /* epapr_ev_idle() was derived from e500_idle() */ _GLOBAL(epapr_ev_idle) PPC_LL r4, TI_LOCAL_FLAGS(r2) /* set napping bit */ ori r4, r4,_TLF_NAPPING /* so when we take an exception */ PPC_STL r4, TI_LOCAL_FLAGS(r2) /* it will return to our caller */ wrteei 1 idle_loop: LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE)) .global epapr_ev_idle_start epapr_ev_idle_start: li r3, -1 nop nop nop /* * Guard against spurious wakeups from a hypervisor -- * only interrupt will cause us to return to LR due to * _TLF_NAPPING. */ b idle_loop #endif /* Hypercall entry point. Will be patched with device tree instructions. */ .global epapr_hypercall_start epapr_hypercall_start: li r3, -1 nop nop nop blr EXPORT_SYMBOL(epapr_hypercall_start)
aixcc-public/challenge-001-exemplar-source
8,273
arch/powerpc/kernel/swsusp_32.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/threads.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/mmu.h> #include <asm/feature-fixups.h> /* * Structure for storing CPU registers on the save area. */ #define SL_SP 0 #define SL_PC 4 #define SL_MSR 8 #define SL_SDR1 0xc #define SL_SPRG0 0x10 /* 4 sprg's */ #define SL_DBAT0 0x20 #define SL_IBAT0 0x28 #define SL_DBAT1 0x30 #define SL_IBAT1 0x38 #define SL_DBAT2 0x40 #define SL_IBAT2 0x48 #define SL_DBAT3 0x50 #define SL_IBAT3 0x58 #define SL_DBAT4 0x60 #define SL_IBAT4 0x68 #define SL_DBAT5 0x70 #define SL_IBAT5 0x78 #define SL_DBAT6 0x80 #define SL_IBAT6 0x88 #define SL_DBAT7 0x90 #define SL_IBAT7 0x98 #define SL_TB 0xa0 #define SL_R2 0xa8 #define SL_CR 0xac #define SL_LR 0xb0 #define SL_R12 0xb4 /* r12 to r31 */ #define SL_SIZE (SL_R12 + 80) .section .data .align 5 _GLOBAL(swsusp_save_area) .space SL_SIZE .section .text .align 5 _GLOBAL(swsusp_arch_suspend) lis r11,swsusp_save_area@h ori r11,r11,swsusp_save_area@l mflr r0 stw r0,SL_LR(r11) mfcr r0 stw r0,SL_CR(r11) stw r1,SL_SP(r11) stw r2,SL_R2(r11) stmw r12,SL_R12(r11) /* Save MSR & SDR1 */ mfmsr r4 stw r4,SL_MSR(r11) mfsdr1 r4 stw r4,SL_SDR1(r11) /* Get a stable timebase and save it */ 1: mftbu r4 stw r4,SL_TB(r11) mftb r5 stw r5,SL_TB+4(r11) mftbu r3 cmpw r3,r4 bne 1b /* Save SPRGs */ mfsprg r4,0 stw r4,SL_SPRG0(r11) mfsprg r4,1 stw r4,SL_SPRG0+4(r11) mfsprg r4,2 stw r4,SL_SPRG0+8(r11) mfsprg r4,3 stw r4,SL_SPRG0+12(r11) /* Save BATs */ mfdbatu r4,0 stw r4,SL_DBAT0(r11) mfdbatl r4,0 stw r4,SL_DBAT0+4(r11) mfdbatu r4,1 stw r4,SL_DBAT1(r11) mfdbatl r4,1 stw r4,SL_DBAT1+4(r11) mfdbatu r4,2 stw r4,SL_DBAT2(r11) mfdbatl r4,2 stw r4,SL_DBAT2+4(r11) mfdbatu r4,3 stw r4,SL_DBAT3(r11) mfdbatl r4,3 stw r4,SL_DBAT3+4(r11) mfibatu r4,0 stw r4,SL_IBAT0(r11) mfibatl r4,0 stw r4,SL_IBAT0+4(r11) mfibatu r4,1 stw r4,SL_IBAT1(r11) mfibatl r4,1 stw r4,SL_IBAT1+4(r11) mfibatu r4,2 stw r4,SL_IBAT2(r11) mfibatl r4,2 stw r4,SL_IBAT2+4(r11) mfibatu r4,3 stw r4,SL_IBAT3(r11) mfibatl r4,3 stw r4,SL_IBAT3+4(r11) BEGIN_MMU_FTR_SECTION mfspr r4,SPRN_DBAT4U stw r4,SL_DBAT4(r11) mfspr r4,SPRN_DBAT4L stw r4,SL_DBAT4+4(r11) mfspr r4,SPRN_DBAT5U stw r4,SL_DBAT5(r11) mfspr r4,SPRN_DBAT5L stw r4,SL_DBAT5+4(r11) mfspr r4,SPRN_DBAT6U stw r4,SL_DBAT6(r11) mfspr r4,SPRN_DBAT6L stw r4,SL_DBAT6+4(r11) mfspr r4,SPRN_DBAT7U stw r4,SL_DBAT7(r11) mfspr r4,SPRN_DBAT7L stw r4,SL_DBAT7+4(r11) mfspr r4,SPRN_IBAT4U stw r4,SL_IBAT4(r11) mfspr r4,SPRN_IBAT4L stw r4,SL_IBAT4+4(r11) mfspr r4,SPRN_IBAT5U stw r4,SL_IBAT5(r11) mfspr r4,SPRN_IBAT5L stw r4,SL_IBAT5+4(r11) mfspr r4,SPRN_IBAT6U stw r4,SL_IBAT6(r11) mfspr r4,SPRN_IBAT6L stw r4,SL_IBAT6+4(r11) mfspr r4,SPRN_IBAT7U stw r4,SL_IBAT7(r11) mfspr r4,SPRN_IBAT7L stw r4,SL_IBAT7+4(r11) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) #if 0 /* Backup various CPU config stuffs */ bl __save_cpu_setup #endif /* Call the low level suspend stuff (we should probably have made * a stackframe... */ bl swsusp_save /* Restore LR from the save area */ lis r11,swsusp_save_area@h ori r11,r11,swsusp_save_area@l lwz r0,SL_LR(r11) mtlr r0 blr /* Resume code */ _GLOBAL(swsusp_arch_resume) #ifdef CONFIG_ALTIVEC /* Stop pending alitvec streams and memory accesses */ BEGIN_FTR_SECTION PPC_DSSALL END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif sync /* Disable MSR:DR to make sure we don't take a TLB or * hash miss during the copy, as our hash table will * for a while be unusable. For .text, we assume we are * covered by a BAT. This works only for non-G5 at this * point. G5 will need a better approach, possibly using * a small temporary hash table filled with large mappings, * disabling the MMU completely isn't a good option for * performance reasons. * (Note that 750's may have the same performance issue as * the G5 in this case, we should investigate using moving * BATs for these CPUs) */ mfmsr r0 sync rlwinm r0,r0,0,28,26 /* clear MSR_DR */ mtmsr r0 sync isync /* Load ptr the list of pages to copy in r3 */ lis r11,(restore_pblist - KERNELBASE)@h ori r11,r11,restore_pblist@l lwz r10,0(r11) /* Copy the pages. This is a very basic implementation, to * be replaced by something more cache efficient */ 1: tophys(r3,r10) li r0,256 mtctr r0 lwz r11,pbe_address(r3) /* source */ tophys(r5,r11) lwz r10,pbe_orig_address(r3) /* destination */ tophys(r6,r10) 2: lwz r8,0(r5) lwz r9,4(r5) lwz r10,8(r5) lwz r11,12(r5) addi r5,r5,16 stw r8,0(r6) stw r9,4(r6) stw r10,8(r6) stw r11,12(r6) addi r6,r6,16 bdnz 2b lwz r10,pbe_next(r3) cmpwi 0,r10,0 bne 1b /* Do a very simple cache flush/inval of the L1 to ensure * coherency of the icache */ lis r3,0x0002 mtctr r3 li r3, 0 1: lwz r0,0(r3) addi r3,r3,0x0020 bdnz 1b isync sync /* Now flush those cache lines */ lis r3,0x0002 mtctr r3 li r3, 0 1: dcbf 0,r3 addi r3,r3,0x0020 bdnz 1b sync /* Ok, we are now running with the kernel data of the old * kernel fully restored. We can get to the save area * easily now. As for the rest of the code, it assumes the * loader kernel and the booted one are exactly identical */ lis r11,swsusp_save_area@h ori r11,r11,swsusp_save_area@l tophys(r11,r11) #if 0 /* Restore various CPU config stuffs */ bl __restore_cpu_setup #endif /* Restore the BATs, and SDR1. Then we can turn on the MMU. * This is a bit hairy as we are running out of those BATs, * but first, our code is probably in the icache, and we are * writing the same value to the BAT, so that should be fine, * though a better solution will have to be found long-term */ lwz r4,SL_SDR1(r11) mtsdr1 r4 lwz r4,SL_SPRG0(r11) mtsprg 0,r4 lwz r4,SL_SPRG0+4(r11) mtsprg 1,r4 lwz r4,SL_SPRG0+8(r11) mtsprg 2,r4 lwz r4,SL_SPRG0+12(r11) mtsprg 3,r4 #if 0 lwz r4,SL_DBAT0(r11) mtdbatu 0,r4 lwz r4,SL_DBAT0+4(r11) mtdbatl 0,r4 lwz r4,SL_DBAT1(r11) mtdbatu 1,r4 lwz r4,SL_DBAT1+4(r11) mtdbatl 1,r4 lwz r4,SL_DBAT2(r11) mtdbatu 2,r4 lwz r4,SL_DBAT2+4(r11) mtdbatl 2,r4 lwz r4,SL_DBAT3(r11) mtdbatu 3,r4 lwz r4,SL_DBAT3+4(r11) mtdbatl 3,r4 lwz r4,SL_IBAT0(r11) mtibatu 0,r4 lwz r4,SL_IBAT0+4(r11) mtibatl 0,r4 lwz r4,SL_IBAT1(r11) mtibatu 1,r4 lwz r4,SL_IBAT1+4(r11) mtibatl 1,r4 lwz r4,SL_IBAT2(r11) mtibatu 2,r4 lwz r4,SL_IBAT2+4(r11) mtibatl 2,r4 lwz r4,SL_IBAT3(r11) mtibatu 3,r4 lwz r4,SL_IBAT3+4(r11) mtibatl 3,r4 BEGIN_MMU_FTR_SECTION lwz r4,SL_DBAT4(r11) mtspr SPRN_DBAT4U,r4 lwz r4,SL_DBAT4+4(r11) mtspr SPRN_DBAT4L,r4 lwz r4,SL_DBAT5(r11) mtspr SPRN_DBAT5U,r4 lwz r4,SL_DBAT5+4(r11) mtspr SPRN_DBAT5L,r4 lwz r4,SL_DBAT6(r11) mtspr SPRN_DBAT6U,r4 lwz r4,SL_DBAT6+4(r11) mtspr SPRN_DBAT6L,r4 lwz r4,SL_DBAT7(r11) mtspr SPRN_DBAT7U,r4 lwz r4,SL_DBAT7+4(r11) mtspr SPRN_DBAT7L,r4 lwz r4,SL_IBAT4(r11) mtspr SPRN_IBAT4U,r4 lwz r4,SL_IBAT4+4(r11) mtspr SPRN_IBAT4L,r4 lwz r4,SL_IBAT5(r11) mtspr SPRN_IBAT5U,r4 lwz r4,SL_IBAT5+4(r11) mtspr SPRN_IBAT5L,r4 lwz r4,SL_IBAT6(r11) mtspr SPRN_IBAT6U,r4 lwz r4,SL_IBAT6+4(r11) mtspr SPRN_IBAT6L,r4 lwz r4,SL_IBAT7(r11) mtspr SPRN_IBAT7U,r4 lwz r4,SL_IBAT7+4(r11) mtspr SPRN_IBAT7L,r4 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) #endif /* Flush all TLBs */ lis r4,0x1000 1: addic. r4,r4,-0x1000 tlbie r4 bgt 1b sync /* restore the MSR and turn on the MMU */ lwz r3,SL_MSR(r11) bl turn_on_mmu tovirt(r11,r11) /* Restore TB */ li r3,0 mttbl r3 lwz r3,SL_TB(r11) lwz r4,SL_TB+4(r11) mttbu r3 mttbl r4 /* Kick decrementer */ li r0,1 mtdec r0 /* Restore the callee-saved registers and return */ lwz r0,SL_CR(r11) mtcr r0 lwz r2,SL_R2(r11) lmw r12,SL_R12(r11) lwz r1,SL_SP(r11) lwz r0,SL_LR(r11) mtlr r0 // XXX Note: we don't really need to call swsusp_resume li r3,0 blr _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume) /* FIXME:This construct is actually not useful since we don't shut * down the instruction MMU, we could just flip back MSR-DR on. */ turn_on_mmu: mflr r4 mtsrr0 r4 mtsrr1 r3 sync isync rfi _ASM_NOKPROBE_SYMBOL(turn_on_mmu)
aixcc-public/challenge-001-exemplar-source
31,467
arch/powerpc/kernel/head_85xx.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Kernel execution entry point code. * * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> * Initial PowerPC version. * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> * Rewritten for PReP * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> * Low-level exception handers, MMU support, and rewrite. * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> * PowerPC 8xx modifications. * Copyright (c) 1998-1999 TiVo, Inc. * PowerPC 403GCX modifications. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> * PowerPC 403GCX/405GP modifications. * Copyright 2000 MontaVista Software Inc. * PPC405 modifications * PowerPC 403GCX/405GP modifications. * Author: MontaVista Software, Inc. * frank_rowand@mvista.com or source@mvista.com * debbie_chu@mvista.com * Copyright 2002-2004 MontaVista Software, Inc. * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> * Copyright 2004 Freescale Semiconductor, Inc * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org> */ #include <linux/init.h> #include <linux/threads.h> #include <linux/pgtable.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/ptrace.h> #include <asm/export.h> #include <asm/feature-fixups.h> #include "head_booke.h" /* As with the other PowerPC ports, it is expected that when code * execution begins here, the following registers contain valid, yet * optional, information: * * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) * r4 - Starting address of the init RAM disk * r5 - Ending address of the init RAM disk * r6 - Start of kernel command line string (e.g. "mem=128") * r7 - End of kernel command line string * */ __HEAD _GLOBAL(_stext); _GLOBAL(_start); /* * Reserve a word at a fixed location to store the address * of abatron_pteptrs */ nop /* Translate device tree address to physical, save in r30/r31 */ bl get_phys_addr mr r30,r3 mr r31,r4 li r25,0 /* phys kernel start (low) */ li r24,0 /* CPU number */ li r23,0 /* phys kernel start (high) */ #ifdef CONFIG_RELOCATABLE LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */ /* Translate _stext address to physical, save in r23/r25 */ bl get_phys_addr mr r23,r3 mr r25,r4 bcl 20,31,$+4 0: mflr r8 addis r3,r8,(is_second_reloc - 0b)@ha lwz r19,(is_second_reloc - 0b)@l(r3) /* Check if this is the second relocation. */ cmpwi r19,1 bne 1f /* * For the second relocation, we already get the real memstart_addr * from device tree. So we will map PAGE_OFFSET to memstart_addr, * then the virtual address of start kernel should be: * PAGE_OFFSET + (kernstart_addr - memstart_addr) * Since the offset between kernstart_addr and memstart_addr should * never be beyond 1G, so we can just use the lower 32bit of them * for the calculation. */ lis r3,PAGE_OFFSET@h addis r4,r8,(kernstart_addr - 0b)@ha addi r4,r4,(kernstart_addr - 0b)@l lwz r5,4(r4) addis r6,r8,(memstart_addr - 0b)@ha addi r6,r6,(memstart_addr - 0b)@l lwz r7,4(r6) subf r5,r7,r5 add r3,r3,r5 b 2f 1: /* * We have the runtime (virtual) address of our base. * We calculate our shift of offset from a 64M page. * We could map the 64M page we belong to at PAGE_OFFSET and * get going from there. */ lis r4,KERNELBASE@h ori r4,r4,KERNELBASE@l rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */ rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */ subf r3,r5,r6 /* r3 = r6 - r5 */ add r3,r4,r3 /* Required Virtual Address */ 2: bl relocate /* * For the second relocation, we already set the right tlb entries * for the kernel space, so skip the code in 85xx_entry_mapping.S */ cmpwi r19,1 beq set_ivor #endif /* We try to not make any assumptions about how the boot loader * setup or used the TLBs. We invalidate all mappings from the * boot loader and load a single entry in TLB1[0] to map the * first 64M of kernel memory. Any boot info passed from the * bootloader needs to live in this first 64M. * * Requirement on bootloader: * - The page we're executing in needs to reside in TLB1 and * have IPROT=1. If not an invalidate broadcast could * evict the entry we're currently executing in. * * r3 = Index of TLB1 were executing in * r4 = Current MSR[IS] * r5 = Index of TLB1 temp mapping * * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] * if needed */ _GLOBAL(__early_start) LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr) lwz r20,0(r20) #define ENTRY_MAPPING_BOOT_SETUP #include "85xx_entry_mapping.S" #undef ENTRY_MAPPING_BOOT_SETUP set_ivor: /* Establish the interrupt vector offsets */ SET_IVOR(0, CriticalInput); SET_IVOR(1, MachineCheck); SET_IVOR(2, DataStorage); SET_IVOR(3, InstructionStorage); SET_IVOR(4, ExternalInput); SET_IVOR(5, Alignment); SET_IVOR(6, Program); SET_IVOR(7, FloatingPointUnavailable); SET_IVOR(8, SystemCall); SET_IVOR(9, AuxillaryProcessorUnavailable); SET_IVOR(10, Decrementer); SET_IVOR(11, FixedIntervalTimer); SET_IVOR(12, WatchdogTimer); SET_IVOR(13, DataTLBError); SET_IVOR(14, InstructionTLBError); SET_IVOR(15, DebugCrit); /* Establish the interrupt vector base */ lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ mtspr SPRN_IVPR,r4 /* Setup the defaults for TLB entries */ li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l mtspr SPRN_MAS4, r2 #if !defined(CONFIG_BDI_SWITCH) /* * The Abatron BDI JTAG debugger does not tolerate others * mucking with the debug registers. */ lis r2,DBCR0_IDM@h mtspr SPRN_DBCR0,r2 isync /* clear any residual debug events */ li r2,-1 mtspr SPRN_DBSR,r2 #endif #ifdef CONFIG_SMP /* Check to see if we're the second processor, and jump * to the secondary_start code if so */ LOAD_REG_ADDR_PIC(r24, boot_cpuid) lwz r24, 0(r24) cmpwi r24, -1 mfspr r24,SPRN_PIR bne __secondary_start #endif /* * This is where the main kernel code starts. */ /* ptr to current */ lis r2,init_task@h ori r2,r2,init_task@l /* ptr to current thread */ addi r4,r2,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 /* stack */ lis r1,init_thread_union@h ori r1,r1,init_thread_union@l li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) #ifdef CONFIG_SMP stw r24, TASK_CPU(r2) #endif bl early_init #ifdef CONFIG_KASAN bl kasan_early_init #endif #ifdef CONFIG_RELOCATABLE mr r3,r30 mr r4,r31 #ifdef CONFIG_PHYS_64BIT mr r5,r23 mr r6,r25 #else mr r5,r25 #endif bl relocate_init #endif #ifdef CONFIG_DYNAMIC_MEMSTART lis r3,kernstart_addr@ha la r3,kernstart_addr@l(r3) #ifdef CONFIG_PHYS_64BIT stw r23,0(r3) stw r25,4(r3) #else stw r25,0(r3) #endif #endif /* * Decide what sort of machine this is and initialize the MMU. */ mr r3,r30 mr r4,r31 bl machine_init bl MMU_init /* Setup PTE pointers for the Abatron bdiGDB */ lis r6, swapper_pg_dir@h ori r6, r6, swapper_pg_dir@l lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l lis r3, kernstart_virt_addr@ha lwz r4, kernstart_virt_addr@l(r3) stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ stw r6, 0(r5) /* Let's move on */ lis r4,start_kernel@h ori r4,r4,start_kernel@l lis r3,MSR_KERNEL@h ori r3,r3,MSR_KERNEL@l mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 rfi /* change context and jump to start_kernel */ /* Macros to hide the PTE size differences * * FIND_PTE -- walks the page tables given EA & pgdir pointer * r10 -- EA of fault * r11 -- PGDIR pointer * r12 -- free * label 2: is the bailout case * * if we find the pte (fall through): * r11 is low pte word * r12 is pointer to the pte * r10 is the pshift from the PGD, if we're a hugepage */ #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_HUGETLB_PAGE #define FIND_PTE \ rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ blt 1000f; /* Normal non-huge page */ \ beq 2f; /* Bail if no table */ \ oris r11, r11, PD_HUGE@h; /* Put back address bit */ \ andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \ xor r12, r10, r11; /* drop size bits from pointer */ \ b 1001f; \ 1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ li r10, 0; /* clear r10 */ \ 1001: lwz r11, 4(r12); /* Get pte entry */ #else #define FIND_PTE \ rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ beq 2f; /* Bail if no table */ \ rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ lwz r11, 4(r12); /* Get pte entry */ #endif /* HUGEPAGE */ #else /* !PTE_64BIT */ #define FIND_PTE \ rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ lwz r11, 0(r11); /* Get L1 entry */ \ rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ beq 2f; /* Bail if no table */ \ rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ lwz r11, 0(r12); /* Get Linux PTE */ #endif /* * Interrupt vector entry code * * The Book E MMUs are always on so we don't need to handle * interrupts in real mode as with previous PPC processors. In * this case we handle interrupts in the kernel virtual address * space. * * Interrupt vectors are dynamically placed relative to the * interrupt prefix as determined by the address of interrupt_base. * The interrupt vectors offsets are programmed using the labels * for each interrupt vector entry. * * Interrupt vectors must be aligned on a 16 byte boundary. * We align on a 32 byte cache line boundary for good measure. */ interrupt_base: /* Critical Input Interrupt */ CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception) /* Machine Check Interrupt */ MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) /* Data Storage Interrupt */ START_EXCEPTION(DataStorage) NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE) mfspr r5,SPRN_ESR /* Grab the ESR, save it */ stw r5,_ESR(r11) mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */ stw r4, _DEAR(r11) andis. r10,r5,(ESR_ILK|ESR_DLK)@h bne 1f prepare_transfer_to_handler bl do_page_fault b interrupt_return 1: prepare_transfer_to_handler bl CacheLockingException b interrupt_return /* Instruction Storage Interrupt */ INSTRUCTION_STORAGE_EXCEPTION /* External Input Interrupt */ EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ) /* Alignment Interrupt */ ALIGNMENT_EXCEPTION /* Program Interrupt */ PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ #ifdef CONFIG_PPC_FPU FP_UNAVAILABLE_EXCEPTION #else EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception) #endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1 /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception) /* Decrementer Interrupt */ DECREMENTER_EXCEPTION /* Fixed Internal Timer Interrupt */ /* TODO: Add FIT support */ EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception) /* Watchdog Timer Interrupt */ #ifdef CONFIG_BOOKE_WDT CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException) #else CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception) #endif /* Data TLB Error Interrupt */ START_EXCEPTION(DataTLBError) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mfspr r10, SPRN_SPRG_THREAD stw r11, THREAD_NORMSAVE(0)(r10) #ifdef CONFIG_KVM_BOOKE_HV BEGIN_FTR_SECTION mfspr r11, SPRN_SRR1 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #endif stw r12, THREAD_NORMSAVE(1)(r10) stw r13, THREAD_NORMSAVE(2)(r10) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 START_BTB_FLUSH_SECTION mfspr r11, SPRN_SRR1 andi. r10,r11,MSR_PR beq 1f BTB_FLUSH(r10) 1: END_BTB_FLUSH_SECTION mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h cmplw 5, r10, r11 blt 5, 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l mfspr r12,SPRN_MAS1 /* Set TID to 0 */ rlwinm r12,r12,0,16,1 mtspr SPRN_MAS1,r12 b 4f /* Get the PGD for the current thread */ 3: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) #ifdef CONFIG_PPC_KUAP mfspr r12, SPRN_MAS1 rlwinm. r12,r12,0,0x3fff0000 beq 2f /* KUAP fault */ #endif 4: /* Mask of required permission bits. Note that while we * do copy ESR:ST to _PAGE_RW position as trying to write * to an RO page is pretty common, we don't do it with * _PAGE_DIRTY. We could do it, but it's a fairly rare * event so I'd rather take the overhead when it happens * rather than adding an instruction here. We should measure * whether the whole thing is worth it in the first place * as we could avoid loading SPRN_ESR completely in the first * place... * * TODO: Is it worth doing that mfspr & rlwimi in the first * place or can we save a couple of instructions here ? */ mfspr r12,SPRN_ESR #ifdef CONFIG_PTE_64BIT li r13,_PAGE_PRESENT oris r13,r13,_PAGE_ACCESSED@h #else li r13,_PAGE_PRESENT|_PAGE_ACCESSED #endif rlwimi r13,r12,11,29,29 FIND_PTE andc. r13,r13,r11 /* Check permission */ #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_SMP subf r13,r11,r12 /* create false data dep */ lwzx r13,r11,r13 /* Get upper pte bits */ #else lwz r13,0(r12) /* Get upper pte bits */ #endif #endif bne 2f /* Bail if permission/valid mismatch */ /* Jump to common tlb load */ b finish_tlb_load 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mfspr r10, SPRN_SPRG_THREAD lwz r11, THREAD_NORMSAVE(3)(r10) mtcr r11 lwz r13, THREAD_NORMSAVE(2)(r10) lwz r12, THREAD_NORMSAVE(1)(r10) lwz r11, THREAD_NORMSAVE(0)(r10) mfspr r10, SPRN_SPRG_RSCRATCH0 b DataStorage /* Instruction TLB Error Interrupt */ /* * Nearly the same as above, except we get our * information from different registers and bailout * to a different point. */ START_EXCEPTION(InstructionTLBError) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mfspr r10, SPRN_SPRG_THREAD stw r11, THREAD_NORMSAVE(0)(r10) #ifdef CONFIG_KVM_BOOKE_HV BEGIN_FTR_SECTION mfspr r11, SPRN_SRR1 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #endif stw r12, THREAD_NORMSAVE(1)(r10) stw r13, THREAD_NORMSAVE(2)(r10) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 START_BTB_FLUSH_SECTION mfspr r11, SPRN_SRR1 andi. r10,r11,MSR_PR beq 1f BTB_FLUSH(r10) 1: END_BTB_FLUSH_SECTION mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h cmplw 5, r10, r11 blt 5, 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l mfspr r12,SPRN_MAS1 /* Set TID to 0 */ rlwinm r12,r12,0,16,1 mtspr SPRN_MAS1,r12 /* Make up the required permissions for kernel code */ #ifdef CONFIG_PTE_64BIT li r13,_PAGE_PRESENT | _PAGE_BAP_SX oris r13,r13,_PAGE_ACCESSED@h #else li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC #endif b 4f /* Get the PGD for the current thread */ 3: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) #ifdef CONFIG_PPC_KUAP mfspr r12, SPRN_MAS1 rlwinm. r12,r12,0,0x3fff0000 beq 2f /* KUAP fault */ #endif /* Make up the required permissions for user code */ #ifdef CONFIG_PTE_64BIT li r13,_PAGE_PRESENT | _PAGE_BAP_UX oris r13,r13,_PAGE_ACCESSED@h #else li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC #endif 4: FIND_PTE andc. r13,r13,r11 /* Check permission */ #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_SMP subf r13,r11,r12 /* create false data dep */ lwzx r13,r11,r13 /* Get upper pte bits */ #else lwz r13,0(r12) /* Get upper pte bits */ #endif #endif bne 2f /* Bail if permission mismatch */ /* Jump to common TLB load point */ b finish_tlb_load 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mfspr r10, SPRN_SPRG_THREAD lwz r11, THREAD_NORMSAVE(3)(r10) mtcr r11 lwz r13, THREAD_NORMSAVE(2)(r10) lwz r12, THREAD_NORMSAVE(1)(r10) lwz r11, THREAD_NORMSAVE(0)(r10) mfspr r10, SPRN_SPRG_RSCRATCH0 b InstructionStorage /* Define SPE handlers for e500v2 */ #ifdef CONFIG_SPE /* SPE Unavailable */ START_EXCEPTION(SPEUnavailable) NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL) beq 1f bl load_up_spe b fast_exception_return 1: prepare_transfer_to_handler bl KernelSPE b interrupt_return #elif defined(CONFIG_SPE_POSSIBLE) EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception) #endif /* CONFIG_SPE_POSSIBLE */ /* SPE Floating Point Data */ #ifdef CONFIG_SPE START_EXCEPTION(SPEFloatingPointData) NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA) prepare_transfer_to_handler bl SPEFloatingPointException REST_NVGPRS(r1) b interrupt_return /* SPE Floating Point Round */ START_EXCEPTION(SPEFloatingPointRound) NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND) prepare_transfer_to_handler bl SPEFloatingPointRoundException REST_NVGPRS(r1) b interrupt_return #elif defined(CONFIG_SPE_POSSIBLE) EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception) EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception) #endif /* CONFIG_SPE_POSSIBLE */ /* Performance Monitor */ EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \ performance_monitor_exception) EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception) CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \ CriticalDoorbell, unknown_exception) /* Debug Interrupt */ DEBUG_DEBUG_EXCEPTION DEBUG_CRIT_EXCEPTION GUEST_DOORBELL_EXCEPTION CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \ unknown_exception) /* Hypercall */ EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception) /* Embedded Hypervisor Privilege */ EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception) interrupt_end: /* * Local functions */ /* * Both the instruction and data TLB miss get to this * point to load the TLB. * r10 - tsize encoding (if HUGETLB_PAGE) or available to use * r11 - TLB (info from Linux PTE) * r12 - available to use * r13 - upper bits of PTE (if PTE_64BIT) or available to use * CR5 - results of addr >= PAGE_OFFSET * MAS0, MAS1 - loaded with proper value when we get here * MAS2, MAS3 - will need additional info from Linux PTE * Upon exit, we reload everything and RFI. */ finish_tlb_load: #ifdef CONFIG_HUGETLB_PAGE cmpwi 6, r10, 0 /* check for huge page */ beq 6, finish_tlb_load_cont /* !huge */ /* Alas, we need more scratch registers for hugepages */ mfspr r12, SPRN_SPRG_THREAD stw r14, THREAD_NORMSAVE(4)(r12) stw r15, THREAD_NORMSAVE(5)(r12) stw r16, THREAD_NORMSAVE(6)(r12) stw r17, THREAD_NORMSAVE(7)(r12) /* Get the next_tlbcam_idx percpu var */ #ifdef CONFIG_SMP lwz r15, TASK_CPU-THREAD(r12) lis r14, __per_cpu_offset@h ori r14, r14, __per_cpu_offset@l rlwinm r15, r15, 2, 0, 29 lwzx r16, r14, r15 #else li r16, 0 #endif lis r17, next_tlbcam_idx@h ori r17, r17, next_tlbcam_idx@l add r17, r17, r16 /* r17 = *next_tlbcam_idx */ lwz r15, 0(r17) /* r15 = next_tlbcam_idx */ lis r14, MAS0_TLBSEL(1)@h /* select TLB1 (TLBCAM) */ rlwimi r14, r15, 16, 4, 15 /* next_tlbcam_idx entry */ mtspr SPRN_MAS0, r14 /* Extract TLB1CFG(NENTRY) */ mfspr r16, SPRN_TLB1CFG andi. r16, r16, 0xfff /* Update next_tlbcam_idx, wrapping when necessary */ addi r15, r15, 1 cmpw r15, r16 blt 100f lis r14, tlbcam_index@h ori r14, r14, tlbcam_index@l lwz r15, 0(r14) 100: stw r15, 0(r17) /* * Calc MAS1_TSIZE from r10 (which has pshift encoded) * tlb_enc = (pshift - 10). */ subi r15, r10, 10 mfspr r16, SPRN_MAS1 rlwimi r16, r15, 7, 20, 24 mtspr SPRN_MAS1, r16 /* copy the pshift for use later */ mr r14, r10 /* fall through */ #endif /* CONFIG_HUGETLB_PAGE */ /* * We set execute, because we don't have the granularity to * properly set this at the page level (Linux problem). * Many of these bits are software only. Bits we don't set * here we (properly should) assume have the appropriate value. */ finish_tlb_load_cont: #ifdef CONFIG_PTE_64BIT rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */ andi. r10, r11, _PAGE_DIRTY bne 1f li r10, MAS3_SW | MAS3_UW andc r12, r12, r10 1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */ rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */ 2: mtspr SPRN_MAS3, r12 BEGIN_MMU_FTR_SECTION srwi r10, r13, 12 /* grab RPN[12:31] */ mtspr SPRN_MAS7, r10 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) #else li r10, (_PAGE_EXEC | _PAGE_PRESENT) mr r13, r11 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */ and r12, r11, r10 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ slwi r10, r12, 1 or r10, r10, r12 rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */ iseleq r12, r12, r10 rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */ mtspr SPRN_MAS3, r13 #endif mfspr r12, SPRN_MAS2 #ifdef CONFIG_PTE_64BIT rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ #else rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ #endif #ifdef CONFIG_HUGETLB_PAGE beq 6, 3f /* don't mask if page isn't huge */ li r13, 1 slw r13, r13, r14 subi r13, r13, 1 rlwinm r13, r13, 0, 0, 19 /* bottom bits used for WIMGE/etc */ andc r12, r12, r13 /* mask off ea bits within the page */ #endif 3: mtspr SPRN_MAS2, r12 tlb_write_entry: tlbwe /* Done...restore registers and get out of here. */ mfspr r10, SPRN_SPRG_THREAD #ifdef CONFIG_HUGETLB_PAGE beq 6, 8f /* skip restore for 4k page faults */ lwz r14, THREAD_NORMSAVE(4)(r10) lwz r15, THREAD_NORMSAVE(5)(r10) lwz r16, THREAD_NORMSAVE(6)(r10) lwz r17, THREAD_NORMSAVE(7)(r10) #endif 8: lwz r11, THREAD_NORMSAVE(3)(r10) mtcr r11 lwz r13, THREAD_NORMSAVE(2)(r10) lwz r12, THREAD_NORMSAVE(1)(r10) lwz r11, THREAD_NORMSAVE(0)(r10) mfspr r10, SPRN_SPRG_RSCRATCH0 rfi /* Force context change */ #ifdef CONFIG_SPE /* Note that the SPE support is closely modeled after the AltiVec * support. Changes to one are likely to be applicable to the * other! */ _GLOBAL(load_up_spe) /* * Disable SPE for the task which had SPE previously, * and save its SPE registers in its thread_struct. * Enables SPE for use in the kernel on return. * On SMP we know the SPE units are free, since we give it up every * switch. -- Kumar */ mfmsr r5 oris r5,r5,MSR_SPE@h mtmsr r5 /* enable use of SPE now */ isync /* enable use of SPE after return */ oris r9,r9,MSR_SPE@h mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ li r4,1 li r10,THREAD_ACC stw r4,THREAD_USED_SPE(r5) evlddx evr4,r10,r5 evmra evr4,evr4 REST_32EVRS(0,r10,r5,THREAD_EVR0) blr /* * SPE unavailable trap from kernel - print a message, but let * the task use SPE in the kernel until it returns to user mode. */ KernelSPE: lwz r3,_MSR(r1) oris r3,r3,MSR_SPE@h stw r3,_MSR(r1) /* enable use of SPE after return */ #ifdef CONFIG_PRINTK lis r3,87f@h ori r3,r3,87f@l mr r4,r2 /* current */ lwz r5,_NIP(r1) bl _printk #endif b interrupt_return #ifdef CONFIG_PRINTK 87: .string "SPE used in kernel (task=%p, pc=%x) \n" #endif .align 4,0 #endif /* CONFIG_SPE */ /* * Translate the effec addr in r3 to phys addr. The phys addr will be put * into r3(higher 32bit) and r4(lower 32bit) */ get_phys_addr: mfmsr r8 mfspr r9,SPRN_PID rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */ mtspr SPRN_MAS6,r9 tlbsx 0,r3 /* must succeed */ mfspr r8,SPRN_MAS1 mfspr r12,SPRN_MAS3 rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */ li r10,1024 slw r10,r10,r9 /* r10 = page size */ addi r10,r10,-1 and r11,r3,r10 /* r11 = page offset */ andc r4,r12,r10 /* r4 = page base */ or r4,r4,r11 /* r4 = devtree phys addr */ #ifdef CONFIG_PHYS_64BIT mfspr r3,SPRN_MAS7 #endif blr /* * Global functions */ #ifdef CONFIG_PPC_E500 #ifndef CONFIG_PPC_E500MC /* Adjust or setup IVORs for e500v1/v2 */ _GLOBAL(__setup_e500_ivors) li r3,DebugCrit@l mtspr SPRN_IVOR15,r3 li r3,SPEUnavailable@l mtspr SPRN_IVOR32,r3 li r3,SPEFloatingPointData@l mtspr SPRN_IVOR33,r3 li r3,SPEFloatingPointRound@l mtspr SPRN_IVOR34,r3 li r3,PerformanceMonitor@l mtspr SPRN_IVOR35,r3 sync blr #else /* Adjust or setup IVORs for e500mc */ _GLOBAL(__setup_e500mc_ivors) li r3,DebugDebug@l mtspr SPRN_IVOR15,r3 li r3,PerformanceMonitor@l mtspr SPRN_IVOR35,r3 li r3,Doorbell@l mtspr SPRN_IVOR36,r3 li r3,CriticalDoorbell@l mtspr SPRN_IVOR37,r3 sync blr /* setup ehv ivors for */ _GLOBAL(__setup_ehv_ivors) li r3,GuestDoorbell@l mtspr SPRN_IVOR38,r3 li r3,CriticalGuestDoorbell@l mtspr SPRN_IVOR39,r3 li r3,Hypercall@l mtspr SPRN_IVOR40,r3 li r3,Ehvpriv@l mtspr SPRN_IVOR41,r3 sync blr #endif /* CONFIG_PPC_E500MC */ #endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_SPE /* * extern void __giveup_spe(struct task_struct *prev) * */ _GLOBAL(__giveup_spe) addi r3,r3,THREAD /* want THREAD of task */ lwz r5,PT_REGS(r3) cmpi 0,r5,0 SAVE_32EVRS(0, r4, r3, THREAD_EVR0) evxor evr6, evr6, evr6 /* clear out evr6 */ evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ li r4,THREAD_ACC evstddx evr6, r4, r3 /* save off accumulator */ beq 1f lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r3,MSR_SPE@h andc r4,r4,r3 /* disable SPE for previous task */ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: blr #endif /* CONFIG_SPE */ /* * extern void abort(void) * * At present, this routine just applies a system reset. */ _GLOBAL(abort) li r13,0 mtspr SPRN_DBCR0,r13 /* disable all debug events */ isync mfmsr r13 ori r13,r13,MSR_DE@l /* Enable Debug Events */ mtmsr r13 isync mfspr r13,SPRN_DBCR0 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h mtspr SPRN_DBCR0,r13 isync #ifdef CONFIG_SMP /* When we get here, r24 needs to hold the CPU # */ .globl __secondary_start __secondary_start: LOAD_REG_ADDR_PIC(r3, tlbcam_index) lwz r3,0(r3) mtctr r3 li r26,0 /* r26 safe? */ bl switch_to_as1 mr r27,r3 /* tlb entry */ /* Load each CAM entry */ 1: mr r3,r26 bl loadcam_entry addi r26,r26,1 bdnz 1b mr r3,r27 /* tlb entry */ LOAD_REG_ADDR_PIC(r4, memstart_addr) lwz r4,0(r4) mr r5,r25 /* phys kernel start */ rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */ subf r4,r5,r4 /* memstart_addr - phys kernel start */ lis r7,KERNELBASE@h ori r7,r7,KERNELBASE@l cmpw r20,r7 /* if kernstart_virt_addr != KERNELBASE, randomized */ beq 2f li r4,0 2: li r5,0 /* no device tree */ li r6,0 /* not boot cpu */ bl restore_to_as0 lis r3,__secondary_hold_acknowledge@h ori r3,r3,__secondary_hold_acknowledge@l stw r24,0(r3) li r3,0 mr r4,r24 /* Why? */ bl call_setup_cpu /* get current's stack and current */ lis r2,secondary_current@ha lwz r2,secondary_current@l(r2) lwz r1,TASK_STACK(r2) /* stack */ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD li r0,0 stw r0,0(r1) /* ptr to current thread */ addi r4,r2,THREAD /* address of our thread_struct */ mtspr SPRN_SPRG_THREAD,r4 /* Setup the defaults for TLB entries */ li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l mtspr SPRN_MAS4,r4 /* Jump to start_secondary */ lis r4,MSR_KERNEL@h ori r4,r4,MSR_KERNEL@l lis r3,start_secondary@h ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 sync rfi sync .globl __secondary_hold_acknowledge __secondary_hold_acknowledge: .long -1 #endif /* * Create a 64M tlb by address and entry * r3 - entry * r4 - virtual address * r5/r6 - physical address */ _GLOBAL(create_kaslr_tlb_entry) lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ mtspr SPRN_MAS0,r7 /* Write MAS0 */ lis r3,(MAS1_VALID|MAS1_IPROT)@h ori r3,r3,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l mtspr SPRN_MAS1,r3 /* Write MAS1 */ lis r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h ori r3,r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l and r3,r3,r4 ori r3,r3,MAS2_M_IF_NEEDED@l mtspr SPRN_MAS2,r3 /* Write MAS2(EPN) */ #ifdef CONFIG_PHYS_64BIT ori r8,r6,(MAS3_SW|MAS3_SR|MAS3_SX) mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */ mtspr SPRN_MAS7,r5 #else ori r8,r5,(MAS3_SW|MAS3_SR|MAS3_SX) mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */ #endif tlbwe /* Write TLB */ isync sync blr /* * Return to the start of the relocated kernel and run again * r3 - virtual address of fdt * r4 - entry of the kernel */ _GLOBAL(reloc_kernel_entry) mfmsr r7 rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS) mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r7 rfi /* * Create a tlb entry with the same effective and physical address as * the tlb entry used by the current running code. But set the TS to 1. * Then switch to the address space 1. It will return with the r3 set to * the ESEL of the new created tlb. */ _GLOBAL(switch_to_as1) mflr r5 /* Find a entry not used */ mfspr r3,SPRN_TLB1CFG andi. r3,r3,0xfff mfspr r4,SPRN_PID rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */ mtspr SPRN_MAS6,r4 1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */ addi r3,r3,-1 rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ mtspr SPRN_MAS0,r4 tlbre mfspr r4,SPRN_MAS1 andis. r4,r4,MAS1_VALID@h bne 1b /* Get the tlb entry used by the current running code */ bcl 20,31,$+4 0: mflr r4 tlbsx 0,r4 mfspr r4,SPRN_MAS1 ori r4,r4,MAS1_TS /* Set the TS = 1 */ mtspr SPRN_MAS1,r4 mfspr r4,SPRN_MAS0 rlwinm r4,r4,0,~MAS0_ESEL_MASK rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ mtspr SPRN_MAS0,r4 tlbwe isync sync mfmsr r4 ori r4,r4,MSR_IS | MSR_DS mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r4 sync rfi /* * Restore to the address space 0 and also invalidate the tlb entry created * by switch_to_as1. * r3 - the tlb entry which should be invalidated * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) * r5 - device tree virtual address. If r4 is 0, r5 is ignored. * r6 - boot cpu */ _GLOBAL(restore_to_as0) mflr r0 bcl 20,31,$+4 0: mflr r9 addi r9,r9,1f - 0b /* * We may map the PAGE_OFFSET in AS0 to a different physical address, * so we need calculate the right jump and device tree address based * on the offset passed by r4. */ add r9,r9,r4 add r5,r5,r4 add r0,r0,r4 2: mfmsr r7 li r8,(MSR_IS | MSR_DS) andc r7,r7,r8 mtspr SPRN_SRR0,r9 mtspr SPRN_SRR1,r7 sync rfi /* Invalidate the temporary tlb entry for AS1 */ 1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ mtspr SPRN_MAS0,r9 tlbre mfspr r9,SPRN_MAS1 rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */ mtspr SPRN_MAS1,r9 tlbwe isync cmpwi r4,0 cmpwi cr1,r6,0 cror eq,4*cr1+eq,eq bne 3f /* offset != 0 && is_boot_cpu */ mtlr r0 blr /* * The PAGE_OFFSET will map to a different physical address, * jump to _start to do another relocation again. */ 3: mr r3,r5 bl _start
aixcc-public/challenge-001-exemplar-source
2,503
arch/powerpc/kernel/reloc_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Code to process dynamic relocations in the kernel. * * Copyright 2008 Paul Mackerras, IBM Corp. */ #include <asm/ppc_asm.h> RELA = 7 RELASZ = 8 RELAENT = 9 R_PPC64_RELATIVE = 22 R_PPC64_UADDR64 = 43 /* * r3 = desired final address of kernel */ _GLOBAL(relocate) mflr r0 bcl 20,31,$+4 0: mflr r12 /* r12 has runtime addr of label 0 */ mtlr r0 ld r11,(p_dyn - 0b)(r12) add r11,r11,r12 /* r11 has runtime addr of .dynamic section */ ld r9,(p_rela - 0b)(r12) add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */ ld r10,(p_st - 0b)(r12) add r10,r10,r12 /* r10 has runtime addr of _stext */ ld r4,(p_sym - 0b)(r12) add r4,r4,r12 /* r4 has runtime addr of .dynsym */ /* * Scan the dynamic section for the RELA, RELASZ and RELAENT entries. */ li r7,0 li r8,0 .Ltags: ld r6,0(r11) /* get tag */ cmpdi r6,0 beq .Lend_of_list /* end of list */ cmpdi r6,RELA bne 2f ld r7,8(r11) /* get RELA pointer in r7 */ b 4f 2: cmpdi r6,RELASZ bne 3f ld r8,8(r11) /* get RELASZ value in r8 */ b 4f 3: cmpdi r6,RELAENT bne 4f ld r12,8(r11) /* get RELAENT value in r12 */ 4: addi r11,r11,16 b .Ltags .Lend_of_list: cmpdi r7,0 /* check we have RELA, RELASZ, RELAENT */ cmpdi cr1,r8,0 beq .Lout beq cr1,.Lout cmpdi r12,0 beq .Lout /* * Work out linktime address of _stext and hence the * relocation offset to be applied. * cur_offset [r7] = rela.run [r9] - rela.link [r7] * _stext.link [r10] = _stext.run [r10] - cur_offset [r7] * final_offset [r3] = _stext.final [r3] - _stext.link [r10] */ subf r7,r7,r9 /* cur_offset */ subf r10,r7,r10 subf r3,r10,r3 /* final_offset */ /* * Run through the list of relocations and process the * R_PPC64_RELATIVE and R_PPC64_UADDR64 ones. */ divd r8,r8,r12 /* RELASZ / RELAENT */ mtctr r8 .Lrels: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */ cmpdi r0,R_PPC64_RELATIVE bne .Luaddr64 ld r6,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ b .Lstore .Luaddr64: srdi r5,r0,32 /* ELF64_R_SYM(reloc->r_info) */ clrldi r0,r0,32 cmpdi r0,R_PPC64_UADDR64 bne .Lnext ld r6,0(r9) ld r0,16(r9) mulli r5,r5,24 /* 24 == sizeof(elf64_sym) */ add r5,r5,r4 /* elf64_sym[ELF64_R_SYM] */ ld r5,8(r5) add r0,r0,r5 .Lstore: add r0,r0,r3 stdx r0,r7,r6 .Lnext: add r9,r9,r12 bdnz .Lrels .Lout: blr .balign 8 p_dyn: .8byte __dynamic_start - 0b p_rela: .8byte __rela_dyn_start - 0b p_sym: .8byte __dynamic_symtab - 0b p_st: .8byte _stext - 0b
aixcc-public/challenge-001-exemplar-source
32,819
arch/powerpc/kernel/head_44x.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Kernel execution entry point code. * * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> * Initial PowerPC version. * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> * Rewritten for PReP * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> * Low-level exception handers, MMU support, and rewrite. * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> * PowerPC 8xx modifications. * Copyright (c) 1998-1999 TiVo, Inc. * PowerPC 403GCX modifications. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> * PowerPC 403GCX/405GP modifications. * Copyright 2000 MontaVista Software Inc. * PPC405 modifications * PowerPC 403GCX/405GP modifications. * Author: MontaVista Software, Inc. * frank_rowand@mvista.com or source@mvista.com * debbie_chu@mvista.com * Copyright 2002-2005 MontaVista Software, Inc. * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> */ #include <linux/init.h> #include <linux/pgtable.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/synch.h> #include <asm/export.h> #include <asm/code-patching-asm.h> #include "head_booke.h" /* As with the other PowerPC ports, it is expected that when code * execution begins here, the following registers contain valid, yet * optional, information: * * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) * r4 - Starting address of the init RAM disk * r5 - Ending address of the init RAM disk * r6 - Start of kernel command line string (e.g. "mem=128") * r7 - End of kernel command line string * */ __HEAD _GLOBAL(_stext); _GLOBAL(_start); /* * Reserve a word at a fixed location to store the address * of abatron_pteptrs */ nop mr r31,r3 /* save device tree ptr */ li r24,0 /* CPU number */ #ifdef CONFIG_RELOCATABLE /* * Relocate ourselves to the current runtime address. * This is called only by the Boot CPU. * "relocate" is called with our current runtime virutal * address. * r21 will be loaded with the physical runtime address of _stext */ bcl 20,31,$+4 /* Get our runtime address */ 0: mflr r21 /* Make it accessible */ addis r21,r21,(_stext - 0b)@ha addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */ /* * We have the runtime (virutal) address of our base. * We calculate our shift of offset from a 256M page. * We could map the 256M page we belong to at PAGE_OFFSET and * get going from there. */ lis r4,KERNELBASE@h ori r4,r4,KERNELBASE@l rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */ rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */ subf r3,r5,r6 /* r3 = r6 - r5 */ add r3,r4,r3 /* Required Virutal Address */ bl relocate #endif bl init_cpu_state /* * This is where the main kernel code starts. */ /* ptr to current */ lis r2,init_task@h ori r2,r2,init_task@l /* ptr to current thread */ addi r4,r2,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 /* stack */ lis r1,init_thread_union@h ori r1,r1,init_thread_union@l li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) bl early_init #ifdef CONFIG_RELOCATABLE /* * Relocatable kernel support based on processing of dynamic * relocation entries. * * r25 will contain RPN/ERPN for the start address of memory * r21 will contain the current offset of _stext */ lis r3,kernstart_addr@ha la r3,kernstart_addr@l(r3) /* * Compute the kernstart_addr. * kernstart_addr => (r6,r8) * kernstart_addr & ~0xfffffff => (r6,r7) */ rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */ rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */ or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */ /* Store kernstart_addr */ stw r6,0(r3) /* higher 32bit */ stw r8,4(r3) /* lower 32bit */ /* * Compute the virt_phys_offset : * virt_phys_offset = stext.run - kernstart_addr * * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff) * When we relocate, we have : * * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff) * * hence: * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff) * */ /* KERNELBASE&~0xfffffff => (r4,r5) */ li r4, 0 /* higer 32bit */ lis r5,KERNELBASE@h rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */ /* * 64bit subtraction. */ subfc r5,r7,r5 subfe r4,r6,r4 /* Store virt_phys_offset */ lis r3,virt_phys_offset@ha la r3,virt_phys_offset@l(r3) stw r4,0(r3) stw r5,4(r3) #elif defined(CONFIG_DYNAMIC_MEMSTART) /* * Mapping based, page aligned dynamic kernel loading. * * r25 will contain RPN/ERPN for the start address of memory * * Add the difference between KERNELBASE and PAGE_OFFSET to the * start of physical memory to get kernstart_addr. */ lis r3,kernstart_addr@ha la r3,kernstart_addr@l(r3) lis r4,KERNELBASE@h ori r4,r4,KERNELBASE@l lis r5,PAGE_OFFSET@h ori r5,r5,PAGE_OFFSET@l subf r4,r5,r4 rlwinm r6,r25,0,28,31 /* ERPN */ rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ add r7,r7,r4 stw r6,0(r3) stw r7,4(r3) #endif /* * Decide what sort of machine this is and initialize the MMU. */ #ifdef CONFIG_KASAN bl kasan_early_init #endif li r3,0 mr r4,r31 bl machine_init bl MMU_init /* Setup PTE pointers for the Abatron bdiGDB */ lis r6, swapper_pg_dir@h ori r6, r6, swapper_pg_dir@l lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l lis r4, KERNELBASE@h ori r4, r4, KERNELBASE@l stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ stw r6, 0(r5) /* Clear the Machine Check Syndrome Register */ li r0,0 mtspr SPRN_MCSR,r0 /* Let's move on */ lis r4,start_kernel@h ori r4,r4,start_kernel@l lis r3,MSR_KERNEL@h ori r3,r3,MSR_KERNEL@l mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 rfi /* change context and jump to start_kernel */ /* * Interrupt vector entry code * * The Book E MMUs are always on so we don't need to handle * interrupts in real mode as with previous PPC processors. In * this case we handle interrupts in the kernel virtual address * space. * * Interrupt vectors are dynamically placed relative to the * interrupt prefix as determined by the address of interrupt_base. * The interrupt vectors offsets are programmed using the labels * for each interrupt vector entry. * * Interrupt vectors must be aligned on a 16 byte boundary. * We align on a 32 byte cache line boundary for good measure. */ interrupt_base: /* Critical Input Interrupt */ CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception) /* Machine Check Interrupt */ CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \ machine_check_exception) MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) /* Data Storage Interrupt */ DATA_STORAGE_EXCEPTION /* Instruction Storage Interrupt */ INSTRUCTION_STORAGE_EXCEPTION /* External Input Interrupt */ EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ) /* Alignment Interrupt */ ALIGNMENT_EXCEPTION /* Program Interrupt */ PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ #ifdef CONFIG_PPC_FPU FP_UNAVAILABLE_EXCEPTION #else EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \ FloatingPointUnavailable, unknown_exception) #endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \ AuxillaryProcessorUnavailable, unknown_exception) /* Decrementer Interrupt */ DECREMENTER_EXCEPTION /* Fixed Internal Timer Interrupt */ /* TODO: Add FIT support */ EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception) /* Watchdog Timer Interrupt */ /* TODO: Add watchdog support */ #ifdef CONFIG_BOOKE_WDT CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException) #else CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception) #endif /* Data TLB Error Interrupt */ START_EXCEPTION(DataTLBError44x) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH1, r11 mtspr SPRN_SPRG_WSCRATCH2, r12 mtspr SPRN_SPRG_WSCRATCH3, r13 mfcr r11 mtspr SPRN_SPRG_WSCRATCH4, r11 mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l mfspr r12,SPRN_MMUCR rlwinm r12,r12,0,0,23 /* Clear TID */ b 4f /* Get the PGD for the current thread */ 3: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) /* Load PID into MMUCR TID */ mfspr r12,SPRN_MMUCR mfspr r13,SPRN_PID /* Get PID */ rlwimi r12,r13,0,24,31 /* Set TID */ #ifdef CONFIG_PPC_KUAP cmpwi r13,0 beq 2f /* KUAP Fault */ #endif 4: mtspr SPRN_MMUCR,r12 /* Mask of required permission bits. Note that while we * do copy ESR:ST to _PAGE_RW position as trying to write * to an RO page is pretty common, we don't do it with * _PAGE_DIRTY. We could do it, but it's a fairly rare * event so I'd rather take the overhead when it happens * rather than adding an instruction here. We should measure * whether the whole thing is worth it in the first place * as we could avoid loading SPRN_ESR completely in the first * place... * * TODO: Is it worth doing that mfspr & rlwimi in the first * place or can we save a couple of instructions here ? */ mfspr r12,SPRN_ESR li r13,_PAGE_PRESENT|_PAGE_ACCESSED rlwimi r13,r12,10,30,30 /* Load the PTE */ /* Compute pgdir/pmd offset */ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 lwzx r11, r12, r11 /* Get pgd/pmd entry */ rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ beq 2f /* Bail if no table */ /* Compute pte address */ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 lwz r11, 0(r12) /* Get high word of pte entry */ lwz r12, 4(r12) /* Get low word of pte entry */ lis r10,tlb_44x_index@ha andc. r13,r13,r12 /* Check permission */ /* Load the next available TLB index */ lwz r13,tlb_44x_index@l(r10) bne 2f /* Bail if permission mismatch */ /* Increment, rollover, and store TLB index */ addi r13,r13,1 patch_site 0f, patch__tlb_44x_hwater_D /* Compare with watermark (instruction gets patched) */ 0: cmpwi 0,r13,1 /* reserve entries */ ble 5f li r13,0 5: /* Store the next available TLB index */ stw r13,tlb_44x_index@l(r10) /* Re-load the faulting address */ mfspr r10,SPRN_DEAR /* Jump to common tlb load */ b finish_tlb_load_44x 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mfspr r11, SPRN_SPRG_RSCRATCH4 mtcr r11 mfspr r13, SPRN_SPRG_RSCRATCH3 mfspr r12, SPRN_SPRG_RSCRATCH2 mfspr r11, SPRN_SPRG_RSCRATCH1 mfspr r10, SPRN_SPRG_RSCRATCH0 b DataStorage /* Instruction TLB Error Interrupt */ /* * Nearly the same as above, except we get our * information from different registers and bailout * to a different point. */ START_EXCEPTION(InstructionTLBError44x) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH1, r11 mtspr SPRN_SPRG_WSCRATCH2, r12 mtspr SPRN_SPRG_WSCRATCH3, r13 mfcr r11 mtspr SPRN_SPRG_WSCRATCH4, r11 mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l mfspr r12,SPRN_MMUCR rlwinm r12,r12,0,0,23 /* Clear TID */ b 4f /* Get the PGD for the current thread */ 3: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) /* Load PID into MMUCR TID */ mfspr r12,SPRN_MMUCR mfspr r13,SPRN_PID /* Get PID */ rlwimi r12,r13,0,24,31 /* Set TID */ #ifdef CONFIG_PPC_KUAP cmpwi r13,0 beq 2f /* KUAP Fault */ #endif 4: mtspr SPRN_MMUCR,r12 /* Make up the required permissions */ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC /* Compute pgdir/pmd offset */ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 lwzx r11, r12, r11 /* Get pgd/pmd entry */ rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ beq 2f /* Bail if no table */ /* Compute pte address */ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 lwz r11, 0(r12) /* Get high word of pte entry */ lwz r12, 4(r12) /* Get low word of pte entry */ lis r10,tlb_44x_index@ha andc. r13,r13,r12 /* Check permission */ /* Load the next available TLB index */ lwz r13,tlb_44x_index@l(r10) bne 2f /* Bail if permission mismatch */ /* Increment, rollover, and store TLB index */ addi r13,r13,1 patch_site 0f, patch__tlb_44x_hwater_I /* Compare with watermark (instruction gets patched) */ 0: cmpwi 0,r13,1 /* reserve entries */ ble 5f li r13,0 5: /* Store the next available TLB index */ stw r13,tlb_44x_index@l(r10) /* Re-load the faulting address */ mfspr r10,SPRN_SRR0 /* Jump to common TLB load point */ b finish_tlb_load_44x 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mfspr r11, SPRN_SPRG_RSCRATCH4 mtcr r11 mfspr r13, SPRN_SPRG_RSCRATCH3 mfspr r12, SPRN_SPRG_RSCRATCH2 mfspr r11, SPRN_SPRG_RSCRATCH1 mfspr r10, SPRN_SPRG_RSCRATCH0 b InstructionStorage /* * Both the instruction and data TLB miss get to this * point to load the TLB. * r10 - EA of fault * r11 - PTE high word value * r12 - PTE low word value * r13 - TLB index * MMUCR - loaded with proper value when we get here * Upon exit, we reload everything and RFI. */ finish_tlb_load_44x: /* Combine RPN & ERPN an write WS 0 */ rlwimi r11,r12,0,0,31-PAGE_SHIFT tlbwe r11,r13,PPC44x_TLB_XLAT /* * Create WS1. This is the faulting address (EPN), * page size, and valid flag. */ li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE /* Insert valid and page size */ rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ /* And WS 2 */ li r10,0xf85 /* Mask to apply from PTE */ rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ and r11,r12,r10 /* Mask PTE bits to keep */ andi. r10,r12,_PAGE_USER /* User page ? */ beq 1f /* nope, leave U bits empty */ rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */ 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ /* Done...restore registers and get out of here. */ mfspr r11, SPRN_SPRG_RSCRATCH4 mtcr r11 mfspr r13, SPRN_SPRG_RSCRATCH3 mfspr r12, SPRN_SPRG_RSCRATCH2 mfspr r11, SPRN_SPRG_RSCRATCH1 mfspr r10, SPRN_SPRG_RSCRATCH0 rfi /* Force context change */ /* TLB error interrupts for 476 */ #ifdef CONFIG_PPC_47x START_EXCEPTION(DataTLBError47x) mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH1,r11 mtspr SPRN_SPRG_WSCRATCH2,r12 mtspr SPRN_SPRG_WSCRATCH3,r13 mfcr r11 mtspr SPRN_SPRG_WSCRATCH4,r11 mfspr r10,SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11,PAGE_OFFSET@h cmplw cr0,r10,r11 blt+ 3f lis r11,swapper_pg_dir@h ori r11,r11, swapper_pg_dir@l li r12,0 /* MMUCR = 0 */ b 4f /* Get the PGD for the current thread and setup MMUCR */ 3: mfspr r11,SPRN_SPRG3 lwz r11,PGDIR(r11) mfspr r12,SPRN_PID /* Get PID */ #ifdef CONFIG_PPC_KUAP cmpwi r12,0 beq 2f /* KUAP Fault */ #endif 4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ /* Mask of required permission bits. Note that while we * do copy ESR:ST to _PAGE_RW position as trying to write * to an RO page is pretty common, we don't do it with * _PAGE_DIRTY. We could do it, but it's a fairly rare * event so I'd rather take the overhead when it happens * rather than adding an instruction here. We should measure * whether the whole thing is worth it in the first place * as we could avoid loading SPRN_ESR completely in the first * place... * * TODO: Is it worth doing that mfspr & rlwimi in the first * place or can we save a couple of instructions here ? */ mfspr r12,SPRN_ESR li r13,_PAGE_PRESENT|_PAGE_ACCESSED rlwimi r13,r12,10,30,30 /* Load the PTE */ /* Compute pgdir/pmd offset */ rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 lwzx r11,r12,r11 /* Get pgd/pmd entry */ /* Word 0 is EPN,V,TS,DSIZ */ li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ li r12,0 tlbwe r10,r12,0 /* XXX can we do better ? Need to make sure tlbwe has established * latch V bit in MMUCR0 before the PTE is loaded further down */ #ifdef CONFIG_SMP isync #endif rlwinm. r12,r11,0,0,20 /* Extract pt base address */ /* Compute pte address */ rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 beq 2f /* Bail if no table */ lwz r11,0(r12) /* Get high word of pte entry */ /* XXX can we do better ? maybe insert a known 0 bit from r11 into the * bottom of r12 to create a data dependency... We can also use r10 * as destination nowadays */ #ifdef CONFIG_SMP lwsync #endif lwz r12,4(r12) /* Get low word of pte entry */ andc. r13,r13,r12 /* Check permission */ /* Jump to common tlb load */ beq finish_tlb_load_47x 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mfspr r11,SPRN_SPRG_RSCRATCH4 mtcr r11 mfspr r13,SPRN_SPRG_RSCRATCH3 mfspr r12,SPRN_SPRG_RSCRATCH2 mfspr r11,SPRN_SPRG_RSCRATCH1 mfspr r10,SPRN_SPRG_RSCRATCH0 b DataStorage /* Instruction TLB Error Interrupt */ /* * Nearly the same as above, except we get our * information from different registers and bailout * to a different point. */ START_EXCEPTION(InstructionTLBError47x) mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH1,r11 mtspr SPRN_SPRG_WSCRATCH2,r12 mtspr SPRN_SPRG_WSCRATCH3,r13 mfcr r11 mtspr SPRN_SPRG_WSCRATCH4,r11 mfspr r10,SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11,PAGE_OFFSET@h cmplw cr0,r10,r11 blt+ 3f lis r11,swapper_pg_dir@h ori r11,r11, swapper_pg_dir@l li r12,0 /* MMUCR = 0 */ b 4f /* Get the PGD for the current thread and setup MMUCR */ 3: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) mfspr r12,SPRN_PID /* Get PID */ #ifdef CONFIG_PPC_KUAP cmpwi r12,0 beq 2f /* KUAP Fault */ #endif 4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ /* Make up the required permissions */ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC /* Load PTE */ /* Compute pgdir/pmd offset */ rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 lwzx r11,r12,r11 /* Get pgd/pmd entry */ /* Word 0 is EPN,V,TS,DSIZ */ li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ li r12,0 tlbwe r10,r12,0 /* XXX can we do better ? Need to make sure tlbwe has established * latch V bit in MMUCR0 before the PTE is loaded further down */ #ifdef CONFIG_SMP isync #endif rlwinm. r12,r11,0,0,20 /* Extract pt base address */ /* Compute pte address */ rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 beq 2f /* Bail if no table */ lwz r11,0(r12) /* Get high word of pte entry */ /* XXX can we do better ? maybe insert a known 0 bit from r11 into the * bottom of r12 to create a data dependency... We can also use r10 * as destination nowadays */ #ifdef CONFIG_SMP lwsync #endif lwz r12,4(r12) /* Get low word of pte entry */ andc. r13,r13,r12 /* Check permission */ /* Jump to common TLB load point */ beq finish_tlb_load_47x 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mfspr r11, SPRN_SPRG_RSCRATCH4 mtcr r11 mfspr r13, SPRN_SPRG_RSCRATCH3 mfspr r12, SPRN_SPRG_RSCRATCH2 mfspr r11, SPRN_SPRG_RSCRATCH1 mfspr r10, SPRN_SPRG_RSCRATCH0 b InstructionStorage /* * Both the instruction and data TLB miss get to this * point to load the TLB. * r10 - free to use * r11 - PTE high word value * r12 - PTE low word value * r13 - free to use * MMUCR - loaded with proper value when we get here * Upon exit, we reload everything and RFI. */ finish_tlb_load_47x: /* Combine RPN & ERPN an write WS 1 */ rlwimi r11,r12,0,0,31-PAGE_SHIFT tlbwe r11,r13,1 /* And make up word 2 */ li r10,0xf85 /* Mask to apply from PTE */ rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ and r11,r12,r10 /* Mask PTE bits to keep */ andi. r10,r12,_PAGE_USER /* User page ? */ beq 1f /* nope, leave U bits empty */ rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */ 1: tlbwe r11,r13,2 /* Done...restore registers and get out of here. */ mfspr r11, SPRN_SPRG_RSCRATCH4 mtcr r11 mfspr r13, SPRN_SPRG_RSCRATCH3 mfspr r12, SPRN_SPRG_RSCRATCH2 mfspr r11, SPRN_SPRG_RSCRATCH1 mfspr r10, SPRN_SPRG_RSCRATCH0 rfi #endif /* CONFIG_PPC_47x */ /* Debug Interrupt */ /* * This statement needs to exist at the end of the IVPR * definition just in case you end up taking a debug * exception within another exception. */ DEBUG_CRIT_EXCEPTION interrupt_end: /* * Global functions */ /* * Adjust the machine check IVOR on 440A cores */ _GLOBAL(__fixup_440A_mcheck) li r3,MachineCheckA@l mtspr SPRN_IVOR1,r3 sync blr /* * Init CPU state. This is called at boot time or for secondary CPUs * to setup initial TLB entries, setup IVORs, etc... * */ _GLOBAL(init_cpu_state) mflr r22 #ifdef CONFIG_PPC_47x /* We use the PVR to differentiate 44x cores from 476 */ mfspr r3,SPRN_PVR srwi r3,r3,16 cmplwi cr0,r3,PVR_476FPE@h beq head_start_47x cmplwi cr0,r3,PVR_476@h beq head_start_47x cmplwi cr0,r3,PVR_476_ISS@h beq head_start_47x #endif /* CONFIG_PPC_47x */ /* * In case the firmware didn't do it, we apply some workarounds * that are good for all 440 core variants here */ mfspr r3,SPRN_CCR0 rlwinm r3,r3,0,0,27 /* disable icache prefetch */ isync mtspr SPRN_CCR0,r3 isync sync /* * Set up the initial MMU state for 44x * * We are still executing code at the virtual address * mappings set by the firmware for the base of RAM. * * We first invalidate all TLB entries but the one * we are running from. We then load the KERNELBASE * mappings so we can begin to use kernel addresses * natively and so the interrupt vector locations are * permanently pinned (necessary since Book E * implementations always have translation enabled). * * TODO: Use the known TLB entry we are running from to * determine which physical region we are located * in. This can be used to determine where in RAM * (on a shared CPU system) or PCI memory space * (on a DRAMless system) we are located. * For now, we assume a perfect world which means * we are located at the base of DRAM (physical 0). */ /* * Search TLB for entry that we are currently using. * Invalidate all entries but the one we are using. */ /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ mfspr r3,SPRN_PID /* Get PID */ mfmsr r4 /* Get MSR */ andi. r4,r4,MSR_IS@l /* TS=1? */ beq wmmucr /* If not, leave STS=0 */ oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ sync bcl 20,31,$+4 /* Find our address */ invstr: mflr r5 /* Make it accessible */ tlbsx r23,0,r5 /* Find entry we are in */ li r4,0 /* Start at TLB entry 0 */ li r3,0 /* Set PAGEID inval value */ 1: cmpw r23,r4 /* Is this our entry? */ beq skpinv /* If so, skip the inval */ tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ skpinv: addi r4,r4,1 /* Increment */ cmpwi r4,64 /* Are we done? */ bne 1b /* If not, repeat */ isync /* If so, context change */ /* * Configure and load pinned entry into TLB slot 63. */ #ifdef CONFIG_NONSTATIC_KERNEL /* * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT * entries of the initial mapping set by the boot loader. * The XLAT entry is stored in r25 */ /* Read the XLAT entry for our current mapping */ tlbre r25,r23,PPC44x_TLB_XLAT lis r3,KERNELBASE@h ori r3,r3,KERNELBASE@l /* Use our current RPN entry */ mr r4,r25 #else lis r3,PAGE_OFFSET@h ori r3,r3,PAGE_OFFSET@l /* Kernel is at the base of RAM */ li r4, 0 /* Load the kernel physical address */ #endif /* Load the kernel PID = 0 */ li r0,0 mtspr SPRN_PID,r0 sync /* Initialize MMUCR */ li r5,0 mtspr SPRN_MMUCR,r5 sync /* pageid fields */ clrrwi r3,r3,10 /* Mask off the effective page number */ ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M /* xlat fields */ clrrwi r4,r4,10 /* Mask off the real page number */ /* ERPN is 0 for first 4GB page */ /* attrib fields */ /* Added guarded bit to protect against speculative loads/stores */ li r5,0 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) li r0,63 /* TLB slot 63 */ tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ /* Force context change */ mfmsr r0 mtspr SPRN_SRR1, r0 lis r0,3f@h ori r0,r0,3f@l mtspr SPRN_SRR0,r0 sync rfi /* If necessary, invalidate original entry we used */ 3: cmpwi r23,63 beq 4f li r6,0 tlbwe r6,r23,PPC44x_TLB_PAGEID isync 4: #ifdef CONFIG_PPC_EARLY_DEBUG_44x /* Add UART mapping for early debug. */ /* pageid fields */ lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K /* xlat fields */ lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH /* attrib fields */ li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) li r0,62 /* TLB slot 0 */ tlbwe r3,r0,PPC44x_TLB_PAGEID tlbwe r4,r0,PPC44x_TLB_XLAT tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Force context change */ isync #endif /* CONFIG_PPC_EARLY_DEBUG_44x */ /* Establish the interrupt vector offsets */ SET_IVOR(0, CriticalInput); SET_IVOR(1, MachineCheck); SET_IVOR(2, DataStorage); SET_IVOR(3, InstructionStorage); SET_IVOR(4, ExternalInput); SET_IVOR(5, Alignment); SET_IVOR(6, Program); SET_IVOR(7, FloatingPointUnavailable); SET_IVOR(8, SystemCall); SET_IVOR(9, AuxillaryProcessorUnavailable); SET_IVOR(10, Decrementer); SET_IVOR(11, FixedIntervalTimer); SET_IVOR(12, WatchdogTimer); SET_IVOR(13, DataTLBError44x); SET_IVOR(14, InstructionTLBError44x); SET_IVOR(15, DebugCrit); b head_start_common #ifdef CONFIG_PPC_47x #ifdef CONFIG_SMP /* Entry point for secondary 47x processors */ _GLOBAL(start_secondary_47x) mr r24,r3 /* CPU number */ bl init_cpu_state /* Now we need to bolt the rest of kernel memory which * is done in C code. We must be careful because our task * struct or our stack can (and will probably) be out * of reach of the initial 256M TLB entry, so we use a * small temporary stack in .bss for that. This works * because only one CPU at a time can be in this code */ lis r1,temp_boot_stack@h ori r1,r1,temp_boot_stack@l addi r1,r1,1024-STACK_FRAME_OVERHEAD li r0,0 stw r0,0(r1) bl mmu_init_secondary /* Now we can get our task struct and real stack pointer */ /* Get current's stack and current */ lis r2,secondary_current@ha lwz r2,secondary_current@l(r2) lwz r1,TASK_STACK(r2) /* Current stack pointer */ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD li r0,0 stw r0,0(r1) /* Kernel stack for exception entry in SPRG3 */ addi r4,r2,THREAD /* init task's THREAD */ mtspr SPRN_SPRG3,r4 b start_secondary #endif /* CONFIG_SMP */ /* * Set up the initial MMU state for 44x * * We are still executing code at the virtual address * mappings set by the firmware for the base of RAM. */ head_start_47x: /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ mfspr r3,SPRN_PID /* Get PID */ mfmsr r4 /* Get MSR */ andi. r4,r4,MSR_IS@l /* TS=1? */ beq 1f /* If not, leave STS=0 */ oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */ 1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ sync /* Find the entry we are running from */ bcl 20,31,$+4 1: mflr r23 tlbsx r23,0,r23 tlbre r24,r23,0 tlbre r25,r23,1 tlbre r26,r23,2 /* * Cleanup time */ /* Initialize MMUCR */ li r5,0 mtspr SPRN_MMUCR,r5 sync clear_all_utlb_entries: #; Set initial values. addis r3,0,0x8000 addi r4,0,0 addi r5,0,0 b clear_utlb_entry #; Align the loop to speed things up. .align 6 clear_utlb_entry: tlbwe r4,r3,0 tlbwe r5,r3,1 tlbwe r5,r3,2 addis r3,r3,0x2000 cmpwi r3,0 bne clear_utlb_entry addis r3,0,0x8000 addis r4,r4,0x100 cmpwi r4,0 bne clear_utlb_entry #; Restore original entry. oris r23,r23,0x8000 /* specify the way */ tlbwe r24,r23,0 tlbwe r25,r23,1 tlbwe r26,r23,2 /* * Configure and load pinned entry into TLB for the kernel core */ lis r3,PAGE_OFFSET@h ori r3,r3,PAGE_OFFSET@l /* Load the kernel PID = 0 */ li r0,0 mtspr SPRN_PID,r0 sync /* Word 0 */ clrrwi r3,r3,12 /* Mask off the effective page number */ ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M /* Word 1 - use r25. RPN is the same as the original entry */ /* Word 2 */ li r5,0 ori r5,r5,PPC47x_TLB2_S_RWX #ifdef CONFIG_SMP ori r5,r5,PPC47x_TLB2_M #endif /* We write to way 0 and bolted 0 */ lis r0,0x8800 tlbwe r3,r0,0 tlbwe r25,r0,1 tlbwe r5,r0,2 /* * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix * them up later */ LOAD_REG_IMMEDIATE(r3, 0x9abcdef0) mtspr SPRN_SSPCR,r3 mtspr SPRN_USPCR,r3 LOAD_REG_IMMEDIATE(r3, 0x12345670) mtspr SPRN_ISPCR,r3 /* Force context change */ mfmsr r0 mtspr SPRN_SRR1, r0 lis r0,3f@h ori r0,r0,3f@l mtspr SPRN_SRR0,r0 sync rfi /* Invalidate original entry we used */ 3: rlwinm r24,r24,0,21,19 /* clear the "valid" bit */ tlbwe r24,r23,0 addi r24,0,0 tlbwe r24,r23,1 tlbwe r24,r23,2 isync /* Clear out the shadow TLB entries */ #ifdef CONFIG_PPC_EARLY_DEBUG_44x /* Add UART mapping for early debug. */ /* Word 0 */ lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M /* Word 1 */ lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH /* Word 2 */ li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG) /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same * congruence class as the kernel, we need to make sure of it at * some point */ lis r0,0x8d00 tlbwe r3,r0,0 tlbwe r4,r0,1 tlbwe r5,r0,2 /* Force context change */ isync #endif /* CONFIG_PPC_EARLY_DEBUG_44x */ /* Establish the interrupt vector offsets */ SET_IVOR(0, CriticalInput); SET_IVOR(1, MachineCheckA); SET_IVOR(2, DataStorage); SET_IVOR(3, InstructionStorage); SET_IVOR(4, ExternalInput); SET_IVOR(5, Alignment); SET_IVOR(6, Program); SET_IVOR(7, FloatingPointUnavailable); SET_IVOR(8, SystemCall); SET_IVOR(9, AuxillaryProcessorUnavailable); SET_IVOR(10, Decrementer); SET_IVOR(11, FixedIntervalTimer); SET_IVOR(12, WatchdogTimer); SET_IVOR(13, DataTLBError47x); SET_IVOR(14, InstructionTLBError47x); SET_IVOR(15, DebugCrit); /* We configure icbi to invalidate 128 bytes at a time since the * current 32-bit kernel code isn't too happy with icache != dcache * block size. We also disable the BTAC as this can cause errors * in some circumstances (see IBM Erratum 47). */ mfspr r3,SPRN_CCR0 oris r3,r3,0x0020 ori r3,r3,0x0040 mtspr SPRN_CCR0,r3 isync #endif /* CONFIG_PPC_47x */ /* * Here we are back to code that is common between 44x and 47x * * We proceed to further kernel initialization and return to the * main kernel entry */ head_start_common: /* Establish the interrupt vector base */ lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ mtspr SPRN_IVPR,r4 /* * If the kernel was loaded at a non-zero 256 MB page, we need to * mask off the most significant 4 bits to get the relative address * from the start of physical memory */ rlwinm r22,r22,0,4,31 addis r22,r22,PAGE_OFFSET@h mtlr r22 isync blr #ifdef CONFIG_SMP .data .align 12 temp_boot_stack: .space 1024 #endif /* CONFIG_SMP */
aixcc-public/challenge-001-exemplar-source
5,509
arch/powerpc/kernel/85xx_entry_mapping.S
/* SPDX-License-Identifier: GPL-2.0 */ /* 1. Find the index of the entry we're executing in */ bcl 20,31,$+4 /* Find our address */ invstr: mflr r6 /* Make it accessible */ mfmsr r7 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ mfspr r7, SPRN_PID0 slwi r7,r7,16 or r7,r7,r4 mtspr SPRN_MAS6,r7 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ mfspr r7,SPRN_MAS1 andis. r7,r7,MAS1_VALID@h bne match_TLB mfspr r7,SPRN_MMUCFG rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */ cmpwi r7,3 bne match_TLB /* skip if NPIDS != 3 */ mfspr r7,SPRN_PID1 slwi r7,r7,16 or r7,r7,r4 mtspr SPRN_MAS6,r7 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ mfspr r7,SPRN_MAS1 andis. r7,r7,MAS1_VALID@h bne match_TLB mfspr r7, SPRN_PID2 slwi r7,r7,16 or r7,r7,r4 mtspr SPRN_MAS6,r7 tlbsx 0,r6 /* Fall through, we had to match */ match_TLB: mfspr r7,SPRN_MAS0 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ mfspr r7,SPRN_MAS1 /* Insure IPROT set */ oris r7,r7,MAS1_IPROT@h mtspr SPRN_MAS1,r7 tlbwe /* 2. Invalidate all entries except the entry we're executing in */ mfspr r9,SPRN_TLB1CFG andi. r9,r9,0xfff li r6,0 /* Set Entry counter to 0 */ 1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ mtspr SPRN_MAS0,r7 tlbre mfspr r7,SPRN_MAS1 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ cmpw r3,r6 beq skpinv /* Dont update the current execution TLB */ mtspr SPRN_MAS1,r7 tlbwe isync skpinv: addi r6,r6,1 /* Increment */ cmpw r6,r9 /* Are we done? */ bne 1b /* If not, repeat */ /* Invalidate TLB0 */ li r6,0x04 tlbivax 0,r6 TLBSYNC /* Invalidate TLB1 */ li r6,0x0c tlbivax 0,r6 TLBSYNC /* 3. Setup a temp mapping and jump to it */ andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ addi r5, r5, 0x1 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ mtspr SPRN_MAS0,r7 tlbre /* grab and fixup the RPN */ mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */ rlwinm r6,r6,25,27,31 li r8,-1 addi r6,r6,10 slw r6,r8,r6 /* convert to mask */ bcl 20,31,$+4 /* Find our address */ 1: mflr r7 mfspr r8,SPRN_MAS3 #ifdef CONFIG_PHYS_64BIT mfspr r23,SPRN_MAS7 #endif and r8,r6,r8 subfic r9,r6,-4096 and r9,r9,r7 or r25,r8,r9 ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR) /* Just modify the entry ID and EPN for the temp mapping */ lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ mtspr SPRN_MAS0,r7 xori r6,r4,1 /* Setup TMP mapping in the other Address space */ slwi r6,r6,12 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l mtspr SPRN_MAS1,r6 mfspr r6,SPRN_MAS2 li r7,0 /* temp EPN = 0 */ rlwimi r7,r6,0,20,31 mtspr SPRN_MAS2,r7 mtspr SPRN_MAS3,r8 tlbwe xori r6,r4,1 slwi r6,r6,5 /* setup new context with other address space */ bcl 20,31,$+4 /* Find our address */ 1: mflr r9 rlwimi r7,r9,0,20,31 addi r7,r7,(2f - 1b) mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r6 rfi 2: /* 4. Clear out PIDs & Search info */ li r6,0 mtspr SPRN_MAS6,r6 mtspr SPRN_PID0,r6 mfspr r7,SPRN_MMUCFG rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */ cmpwi r7,3 bne 2f /* skip if NPIDS != 3 */ mtspr SPRN_PID1,r6 mtspr SPRN_PID2,r6 /* 5. Invalidate mapping we started in */ 2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ mtspr SPRN_MAS0,r7 tlbre mfspr r6,SPRN_MAS1 rlwinm r6,r6,0,2,0 /* clear IPROT */ mtspr SPRN_MAS1,r6 tlbwe /* Invalidate TLB1 */ li r9,0x0c tlbivax 0,r9 TLBSYNC #if defined(ENTRY_MAPPING_BOOT_SETUP) /* 6. Setup kernstart_virt_addr mapping in TLB1[0] */ lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ mtspr SPRN_MAS0,r6 lis r6,(MAS1_VALID|MAS1_IPROT)@h ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l mtspr SPRN_MAS1,r6 lis r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h ori r6,r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l and r6,r6,r20 ori r6,r6,MAS2_M_IF_NEEDED@l mtspr SPRN_MAS2,r6 mtspr SPRN_MAS3,r8 tlbwe /* 7. Jump to kernstart_virt_addr mapping */ mr r6,r20 #elif defined(ENTRY_MAPPING_KEXEC_SETUP) /* * 6. Setup a 1:1 mapping in TLB1. Esel 0 is unsued, 1 or 2 contains the tmp * mapping so we start at 3. We setup 8 mappings, each 256MiB in size. This * will cover the first 2GiB of memory. */ lis r10, (MAS1_VALID|MAS1_IPROT)@h ori r10,r10, (MAS1_TSIZE(BOOK3E_PAGESZ_256M))@l li r11, 0 li r0, 8 mtctr r0 next_tlb_setup: addi r0, r11, 3 rlwinm r0, r0, 16, 4, 15 // Compute esel rlwinm r9, r11, 28, 0, 3 // Compute [ER]PN oris r0, r0, (MAS0_TLBSEL(1))@h mtspr SPRN_MAS0,r0 mtspr SPRN_MAS1,r10 mtspr SPRN_MAS2,r9 ori r9, r9, (MAS3_SX|MAS3_SW|MAS3_SR) mtspr SPRN_MAS3,r9 tlbwe addi r11, r11, 1 bdnz+ next_tlb_setup /* 7. Jump to our 1:1 mapping */ mr r6, r25 #else #error You need to specify the mapping or not use this at all. #endif lis r7,MSR_KERNEL@h ori r7,r7,MSR_KERNEL@l bcl 20,31,$+4 /* Find our address */ 1: mflr r9 rlwimi r6,r9,0,20,31 addi r6,r6,(2f - 1b) mtspr SPRN_SRR0,r6 mtspr SPRN_SRR1,r7 rfi /* start execution out of TLB1[0] entry */ /* 8. Clear out the temp mapping */ 2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ mtspr SPRN_MAS0,r7 tlbre mfspr r8,SPRN_MAS1 rlwinm r8,r8,0,2,0 /* clear IPROT */ mtspr SPRN_MAS1,r8 tlbwe /* Invalidate TLB1 */ li r9,0x0c tlbivax 0,r9 TLBSYNC
aixcc-public/challenge-001-exemplar-source
4,714
arch/powerpc/kernel/idle_6xx.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains the power_save function for 6xx & 7xxx CPUs * rewritten in assembler * * Warning ! This code assumes that if your machine has a 750fx * it will have PLL 1 set to low speed mode (used during NAP/DOZE). * if this is not the case some additional changes will have to * be done to check a runtime var (a bit like powersave-nap) */ #include <linux/threads.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/feature-fixups.h> .text /* * Init idle, called at early CPU setup time from head.S for each CPU * Make sure no rest of NAP mode remains in HID0, save default * values for some CPU specific registers. Called with r24 * containing CPU number and r3 reloc offset */ _GLOBAL(init_idle_6xx) BEGIN_FTR_SECTION mfspr r4,SPRN_HID0 rlwinm r4,r4,0,10,8 /* Clear NAP */ mtspr SPRN_HID0, r4 b 1f END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) blr 1: slwi r5,r24,2 add r5,r5,r3 BEGIN_FTR_SECTION mfspr r4,SPRN_MSSCR0 addis r6,r5, nap_save_msscr0@ha stw r4,nap_save_msscr0@l(r6) END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) BEGIN_FTR_SECTION mfspr r4,SPRN_HID1 addis r6,r5,nap_save_hid1@ha stw r4,nap_save_hid1@l(r6) END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) blr /* * Here is the power_save_6xx function. This could eventually be * split into several functions & changing the function pointer * depending on the various features. */ _GLOBAL(ppc6xx_idle) /* Check if we can nap or doze, put HID0 mask in r3 */ lis r3, 0 BEGIN_FTR_SECTION lis r3,HID0_DOZE@h END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) BEGIN_FTR_SECTION /* We must dynamically check for the NAP feature as it * can be cleared by CPU init after the fixups are done */ lis r4,cur_cpu_spec@ha lwz r4,cur_cpu_spec@l(r4) lwz r4,CPU_SPEC_FEATURES(r4) andi. r0,r4,CPU_FTR_CAN_NAP beq 1f /* Now check if user or arch enabled NAP mode */ lis r4,powersave_nap@ha lwz r4,powersave_nap@l(r4) cmpwi 0,r4,0 beq 1f lis r3,HID0_NAP@h 1: END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) cmpwi 0,r3,0 beqlr /* Some pre-nap cleanups needed on some CPUs */ andis. r0,r3,HID0_NAP@h beq 2f BEGIN_FTR_SECTION /* Disable L2 prefetch on some 745x and try to ensure * L2 prefetch engines are idle. As explained by errata * text, we can't be sure they are, we just hope very hard * that well be enough (sic !). At least I noticed Apple * doesn't even bother doing the dcbf's here... */ mfspr r4,SPRN_MSSCR0 rlwinm r4,r4,0,0,29 sync mtspr SPRN_MSSCR0,r4 sync isync lis r4,KERNELBASE@h dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) 2: BEGIN_FTR_SECTION /* Go to low speed mode on some 750FX */ lis r4,powersave_lowspeed@ha lwz r4,powersave_lowspeed@l(r4) cmpwi 0,r4,0 beq 1f mfspr r4,SPRN_HID1 oris r4,r4,0x0001 mtspr SPRN_HID1,r4 1: END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) /* Go to NAP or DOZE now */ mfspr r4,SPRN_HID0 lis r5,(HID0_NAP|HID0_SLEEP)@h BEGIN_FTR_SECTION oris r5,r5,HID0_DOZE@h END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) andc r4,r4,r5 or r4,r4,r3 BEGIN_FTR_SECTION oris r4,r4,HID0_DPM@h /* that should be done once for all */ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) mtspr SPRN_HID0,r4 BEGIN_FTR_SECTION PPC_DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */ stw r8,TI_LOCAL_FLAGS(r2) /* it will return to our caller */ mfmsr r7 ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h 1: sync mtmsr r7 isync b 1b /* * Return from NAP/DOZE mode, restore some CPU specific registers, * R11 points to the exception frame. We have to preserve r10. */ _GLOBAL(power_save_ppc32_restore) lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */ stw r9,_NIP(r11) /* make it do a blr */ #ifdef CONFIG_SMP lwz r11,TASK_CPU(r2) /* get cpu number * 4 */ slwi r11,r11,2 #else li r11,0 #endif /* Todo make sure all these are in the same page * and load r11 (@ha part + CPU offset) only once */ BEGIN_FTR_SECTION mfspr r9,SPRN_HID0 andis. r9,r9,HID0_NAP@h beq 1f addis r9, r11, nap_save_msscr0@ha lwz r9,nap_save_msscr0@l(r9) mtspr SPRN_MSSCR0, r9 sync isync 1: END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) BEGIN_FTR_SECTION addis r9, r11, nap_save_hid1@ha lwz r9,nap_save_hid1@l(r9) mtspr SPRN_HID1, r9 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) blr _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) .data _GLOBAL(nap_save_msscr0) .space 4*NR_CPUS _GLOBAL(nap_save_hid1) .space 4*NR_CPUS _GLOBAL(powersave_lowspeed) .long 0
aixcc-public/challenge-001-exemplar-source
4,315
arch/powerpc/kernel/rtas_entry.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ #include <asm/asm-offsets.h> #include <asm/bug.h> #include <asm/page.h> #include <asm/ppc_asm.h> /* * RTAS is called with MSR IR, DR, EE disabled, and LR in the return address. * * Note: r3 is an input parameter to rtas, so don't trash it... */ #ifdef CONFIG_PPC32 _GLOBAL(enter_rtas) stwu r1,-INT_FRAME_SIZE(r1) mflr r0 stw r0,INT_FRAME_SIZE+4(r1) LOAD_REG_ADDR(r4, rtas) lis r6,1f@ha /* physical return address for rtas */ addi r6,r6,1f@l tophys(r6,r6) lwz r8,RTASENTRY(r4) lwz r4,RTASBASE(r4) mfmsr r9 stw r9,8(r1) li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) mtlr r6 stw r1, THREAD + RTAS_SP(r2) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 rfi 1: lis r8, 1f@h ori r8, r8, 1f@l LOAD_REG_IMMEDIATE(r9,MSR_KERNEL) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 rfi /* Reactivate MMU translation */ 1: lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */ lwz r9,8(r1) /* original msr value */ addi r1,r1,INT_FRAME_SIZE li r0,0 stw r0, THREAD + RTAS_SP(r2) mtlr r8 mtmsr r9 blr /* return to caller */ _ASM_NOKPROBE_SYMBOL(enter_rtas) #else /* CONFIG_PPC32 */ #include <asm/exception-64s.h> /* * 32-bit rtas on 64-bit machines has the additional problem that RTAS may * not preserve the upper parts of registers it uses. */ _GLOBAL(enter_rtas) mflr r0 std r0,16(r1) stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */ /* Because RTAS is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers * RTAS might touch to the stack. (r0, r3-r12 are caller saved) */ SAVE_GPR(2, r1) /* Save the TOC */ SAVE_NVGPRS(r1) /* Save the non-volatiles */ mfcr r4 std r4,_CCR(r1) mfctr r5 std r5,_CTR(r1) mfspr r6,SPRN_XER std r6,_XER(r1) mfdar r7 std r7,_DAR(r1) mfdsisr r8 std r8,_DSISR(r1) /* Temporary workaround to clear CR until RTAS can be modified to * ignore all bits. */ li r0,0 mtcr r0 mfmsr r6 /* Unfortunately, the stack pointer and the MSR are also clobbered, * so they are saved in the PACA which allows us to restore * our original state after RTAS returns. */ std r1,PACAR1(r13) std r6,PACASAVEDMSR(r13) /* Setup our real return addr */ LOAD_REG_ADDR(r4,rtas_return_loc) clrldi r4,r4,2 /* convert to realmode address */ mtlr r4 __enter_rtas: LOAD_REG_ADDR(r4, rtas) ld r5,RTASENTRY(r4) /* get the rtas->entry value */ ld r4,RTASBASE(r4) /* get the rtas->base value */ /* * RTAS runs in 32-bit big endian real mode, but leave MSR[RI] on as we * may hit NMI (SRESET or MCE) while in RTAS. RTAS should disable RI in * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S] * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if * MSR[S] is set, it will remain when entering RTAS. * If we're in HV mode, RTAS must also run in HV mode, so extract MSR_HV * from the saved MSR value and insert into the value RTAS will use. */ extrdi r0, r6, 1, 63 - MSR_HV_LG LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI) insrdi r6, r0, 1, 63 - MSR_HV_LG li r0,0 mtmsrd r0,1 /* disable RI before using SRR0/1 */ mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r6 RFI_TO_KERNEL b . /* prevent speculative execution */ rtas_return_loc: FIXUP_ENDIAN /* Set SF before anything. */ LOAD_REG_IMMEDIATE(r6, MSR_KERNEL & ~(MSR_IR|MSR_DR)) mtmsrd r6 /* relocation is off at this point */ GET_PACA(r13) bcl 20,31,$+4 0: mflr r3 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ ld r1,PACAR1(r13) /* Restore our SP */ ld r4,PACASAVEDMSR(r13) /* Restore our MSR */ mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 RFI_TO_KERNEL b . /* prevent speculative execution */ _ASM_NOKPROBE_SYMBOL(enter_rtas) _ASM_NOKPROBE_SYMBOL(__enter_rtas) _ASM_NOKPROBE_SYMBOL(rtas_return_loc) .align 3 1: .8byte rtas_restore_regs rtas_restore_regs: /* relocation is on at this point */ REST_GPR(2, r1) /* Restore the TOC */ REST_NVGPRS(r1) /* Restore the non-volatiles */ ld r4,_CCR(r1) mtcr r4 ld r5,_CTR(r1) mtctr r5 ld r6,_XER(r1) mtspr SPRN_XER,r6 ld r7,_DAR(r1) mtdar r7 ld r8,_DSISR(r1) mtdsisr r8 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */ ld r0,16(r1) /* get return address */ mtlr r0 blr /* return to caller */ #endif /* CONFIG_PPC32 */
aixcc-public/challenge-001-exemplar-source
3,896
arch/powerpc/kernel/swsusp_85xx.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Based on swsusp_32.S, modified for FSL BookE by * Anton Vorontsov <avorontsov@ru.mvista.com> * Copyright (c) 2009-2010 MontaVista Software, LLC. */ #include <linux/threads.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/mmu.h> /* * Structure for storing CPU registers on the save area. */ #define SL_SP 0 #define SL_PC 4 #define SL_MSR 8 #define SL_TCR 0xc #define SL_SPRG0 0x10 #define SL_SPRG1 0x14 #define SL_SPRG2 0x18 #define SL_SPRG3 0x1c #define SL_SPRG4 0x20 #define SL_SPRG5 0x24 #define SL_SPRG6 0x28 #define SL_SPRG7 0x2c #define SL_TBU 0x30 #define SL_TBL 0x34 #define SL_R2 0x38 #define SL_CR 0x3c #define SL_LR 0x40 #define SL_R12 0x44 /* r12 to r31 */ #define SL_SIZE (SL_R12 + 80) .section .data .align 5 _GLOBAL(swsusp_save_area) .space SL_SIZE .section .text .align 5 _GLOBAL(swsusp_arch_suspend) lis r11,swsusp_save_area@h ori r11,r11,swsusp_save_area@l mflr r0 stw r0,SL_LR(r11) mfcr r0 stw r0,SL_CR(r11) stw r1,SL_SP(r11) stw r2,SL_R2(r11) stmw r12,SL_R12(r11) /* Save MSR & TCR */ mfmsr r4 stw r4,SL_MSR(r11) mfspr r4,SPRN_TCR stw r4,SL_TCR(r11) /* Get a stable timebase and save it */ 1: mfspr r4,SPRN_TBRU stw r4,SL_TBU(r11) mfspr r5,SPRN_TBRL stw r5,SL_TBL(r11) mfspr r3,SPRN_TBRU cmpw r3,r4 bne 1b /* Save SPRGs */ mfspr r4,SPRN_SPRG0 stw r4,SL_SPRG0(r11) mfspr r4,SPRN_SPRG1 stw r4,SL_SPRG1(r11) mfspr r4,SPRN_SPRG2 stw r4,SL_SPRG2(r11) mfspr r4,SPRN_SPRG3 stw r4,SL_SPRG3(r11) mfspr r4,SPRN_SPRG4 stw r4,SL_SPRG4(r11) mfspr r4,SPRN_SPRG5 stw r4,SL_SPRG5(r11) mfspr r4,SPRN_SPRG6 stw r4,SL_SPRG6(r11) mfspr r4,SPRN_SPRG7 stw r4,SL_SPRG7(r11) /* Call the low level suspend stuff (we should probably have made * a stackframe... */ bl swsusp_save /* Restore LR from the save area */ lis r11,swsusp_save_area@h ori r11,r11,swsusp_save_area@l lwz r0,SL_LR(r11) mtlr r0 blr _GLOBAL(swsusp_arch_resume) sync /* Load ptr the list of pages to copy in r3 */ lis r11,(restore_pblist)@h ori r11,r11,restore_pblist@l lwz r3,0(r11) /* Copy the pages. This is a very basic implementation, to * be replaced by something more cache efficient */ 1: li r0,256 mtctr r0 lwz r5,pbe_address(r3) /* source */ lwz r6,pbe_orig_address(r3) /* destination */ 2: lwz r8,0(r5) lwz r9,4(r5) lwz r10,8(r5) lwz r11,12(r5) addi r5,r5,16 stw r8,0(r6) stw r9,4(r6) stw r10,8(r6) stw r11,12(r6) addi r6,r6,16 bdnz 2b lwz r3,pbe_next(r3) cmpwi 0,r3,0 bne 1b bl flush_dcache_L1 bl flush_instruction_cache lis r11,swsusp_save_area@h ori r11,r11,swsusp_save_area@l /* * Mappings from virtual addresses to physical addresses may be * different than they were prior to restoring hibernation state. * Invalidate the TLB so that the boot CPU is using the new * mappings. */ bl _tlbil_all lwz r4,SL_SPRG0(r11) mtspr SPRN_SPRG0,r4 lwz r4,SL_SPRG1(r11) mtspr SPRN_SPRG1,r4 lwz r4,SL_SPRG2(r11) mtspr SPRN_SPRG2,r4 lwz r4,SL_SPRG3(r11) mtspr SPRN_SPRG3,r4 lwz r4,SL_SPRG4(r11) mtspr SPRN_SPRG4,r4 lwz r4,SL_SPRG5(r11) mtspr SPRN_SPRG5,r4 lwz r4,SL_SPRG6(r11) mtspr SPRN_SPRG6,r4 lwz r4,SL_SPRG7(r11) mtspr SPRN_SPRG7,r4 /* restore the MSR */ lwz r3,SL_MSR(r11) mtmsr r3 /* Restore TB */ li r3,0 mtspr SPRN_TBWL,r3 lwz r3,SL_TBU(r11) lwz r4,SL_TBL(r11) mtspr SPRN_TBWU,r3 mtspr SPRN_TBWL,r4 /* Restore TCR and clear any pending bits in TSR. */ lwz r4,SL_TCR(r11) mtspr SPRN_TCR,r4 lis r4, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h mtspr SPRN_TSR,r4 /* Kick decrementer */ li r0,1 mtdec r0 /* Restore the callee-saved registers and return */ lwz r0,SL_CR(r11) mtcr r0 lwz r2,SL_R2(r11) lmw r12,SL_R12(r11) lwz r1,SL_SP(r11) lwz r0,SL_LR(r11) mtlr r0 li r3,0 blr
aixcc-public/challenge-001-exemplar-source
13,117
arch/powerpc/kernel/tm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Transactional memory support routines to reclaim and recheckpoint * transactional process state. * * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. */ #include <asm/asm-offsets.h> #include <asm/ppc_asm.h> #include <asm/ppc-opcode.h> #include <asm/ptrace.h> #include <asm/reg.h> #include <asm/bug.h> #include <asm/export.h> #include <asm/feature-fixups.h> #ifdef CONFIG_VSX /* See fpu.S, this is borrowed from there */ #define __SAVE_32FPRS_VSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ SAVE_32FPRS(n,base); \ b 3f; \ 2: SAVE_32VSRS(n,c,base); \ 3: #define __REST_32FPRS_VSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ REST_32FPRS(n,base); \ b 3f; \ 2: REST_32VSRS(n,c,base); \ 3: #else #define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base) #define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) #endif #define SAVE_32FPRS_VSRS(n,c,base) \ __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base) #define REST_32FPRS_VSRS(n,c,base) \ __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base) /* Stack frame offsets for local variables. */ #define TM_FRAME_L0 TM_FRAME_SIZE-16 #define TM_FRAME_L1 TM_FRAME_SIZE-8 /* In order to access the TM SPRs, TM must be enabled. So, do so: */ _GLOBAL(tm_enable) mfmsr r4 li r3, MSR_TM >> 32 sldi r3, r3, 32 and. r0, r4, r3 bne 1f or r4, r4, r3 mtmsrd r4 1: blr EXPORT_SYMBOL_GPL(tm_enable); _GLOBAL(tm_disable) mfmsr r4 li r3, MSR_TM >> 32 sldi r3, r3, 32 andc r4, r4, r3 mtmsrd r4 blr EXPORT_SYMBOL_GPL(tm_disable); _GLOBAL(tm_save_sprs) mfspr r0, SPRN_TFHAR std r0, THREAD_TM_TFHAR(r3) mfspr r0, SPRN_TEXASR std r0, THREAD_TM_TEXASR(r3) mfspr r0, SPRN_TFIAR std r0, THREAD_TM_TFIAR(r3) blr _GLOBAL(tm_restore_sprs) ld r0, THREAD_TM_TFHAR(r3) mtspr SPRN_TFHAR, r0 ld r0, THREAD_TM_TEXASR(r3) mtspr SPRN_TEXASR, r0 ld r0, THREAD_TM_TFIAR(r3) mtspr SPRN_TFIAR, r0 blr /* Passed an 8-bit failure cause as first argument. */ _GLOBAL(tm_abort) TABORT(R3) blr EXPORT_SYMBOL_GPL(tm_abort); /* * void tm_reclaim(struct thread_struct *thread, * uint8_t cause) * * - Performs a full reclaim. This destroys outstanding * transactions and updates thread.ckpt_regs, thread.ckfp_state and * thread.ckvr_state with the original checkpointed state. Note that * thread->regs is unchanged. * * Purpose is to both abort transactions of, and preserve the state of, * a transactions at a context switch. We preserve/restore both sets of process * state to restore them when the thread's scheduled again. We continue in * userland as though nothing happened, but when the transaction is resumed * they will abort back to the checkpointed state we save out here. * * Call with IRQs off, stacks get all out of sync for some periods in here! */ _GLOBAL(tm_reclaim) mfcr r5 mflr r0 stw r5, 8(r1) std r0, 16(r1) std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) /* * Save kernel live AMR since it will be clobbered by treclaim * but can be used elsewhere later in kernel space. */ mfspr r3, SPRN_AMR std r3, TM_FRAME_L1(r1) /* We need to setup MSR for VSX register save instructions. */ mfmsr r14 mr r15, r14 ori r15, r15, MSR_FP li r16, 0 ori r16, r16, MSR_EE /* IRQs hard off */ andc r15, r15, r16 oris r15, r15, MSR_VEC@h #ifdef CONFIG_VSX BEGIN_FTR_SECTION oris r15,r15, MSR_VSX@h END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif mtmsrd r15 std r14, TM_FRAME_L0(r1) /* Do sanity check on MSR to make sure we are suspended */ li r7, (MSR_TS_S)@higher srdi r6, r14, 32 and r6, r6, r7 1: tdeqi r6, 0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 /* Stash the stack pointer away for use after reclaim */ std r1, PACAR1(r13) /* Clear MSR RI since we are about to use SCRATCH0, EE is already off */ li r5, 0 mtmsrd r5, 1 /* * BE CAREFUL HERE: * At this point we can't take an SLB miss since we have MSR_RI * off. Load only to/from the stack/paca which are in SLB bolted regions * until we turn MSR RI back on. * * The moment we treclaim, ALL of our GPRs will switch * to user register state. (FPRs, CCR etc. also!) * Use an sprg and a tm_scratch in the PACA to shuffle. */ TRECLAIM(R4) /* Cause in r4 */ /* * ******************** GPRs ******************** * Stash the checkpointed r13 in the scratch SPR and get the real paca. */ SET_SCRATCH0(r13) GET_PACA(r13) /* * Stash the checkpointed r1 away in paca->tm_scratch and get the real * stack pointer back into r1. */ std r1, PACATMSCRATCH(r13) ld r1, PACAR1(r13) std r11, GPR11(r1) /* Temporary stash */ /* * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is * clobbered by an exception once we turn on MSR_RI below. */ ld r11, PACATMSCRATCH(r13) std r11, GPR1(r1) /* * Store r13 away so we can free up the scratch SPR for the SLB fault * handler (needed once we start accessing the thread_struct). */ GET_SCRATCH0(r11) std r11, GPR13(r1) /* Reset MSR RI so we can take SLB faults again */ li r11, MSR_RI mtmsrd r11, 1 /* Store the PPR in r11 and reset to decent value */ mfspr r11, SPRN_PPR HMT_MEDIUM /* Now get some more GPRS free */ std r7, GPR7(r1) /* Temporary stash */ std r12, GPR12(r1) /* '' '' '' */ ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ /* * Make r7 look like an exception frame so that we can use the neat * GPRx(n) macros. r7 is NOT a pt_regs ptr! */ subi r7, r7, STACK_FRAME_OVERHEAD /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */ SAVE_GPR(0, r7) /* user r0 */ SAVE_GPRS(2, 6, r7) /* user r2-r6 */ SAVE_GPRS(8, 10, r7) /* user r8-r10 */ ld r3, GPR1(r1) /* user r1 */ ld r4, GPR7(r1) /* user r7 */ ld r5, GPR11(r1) /* user r11 */ ld r6, GPR12(r1) /* user r12 */ ld r8, GPR13(r1) /* user r13 */ std r3, GPR1(r7) std r4, GPR7(r7) std r5, GPR11(r7) std r6, GPR12(r7) std r8, GPR13(r7) SAVE_NVGPRS(r7) /* user r14-r31 */ /* ******************** NIP ******************** */ mfspr r3, SPRN_TFHAR std r3, _NIP(r7) /* Returns to failhandler */ /* * The checkpointed NIP is ignored when rescheduling/rechkpting, * but is used in signal return to 'wind back' to the abort handler. */ /* ***************** CTR, LR, CR, XER ********** */ mfctr r3 mflr r4 mfcr r5 mfxer r6 std r3, _CTR(r7) std r4, _LINK(r7) std r5, _CCR(r7) std r6, _XER(r7) /* ******************** TAR, DSCR ********** */ mfspr r3, SPRN_TAR mfspr r4, SPRN_DSCR std r3, THREAD_TM_TAR(r12) std r4, THREAD_TM_DSCR(r12) /* ******************** AMR **************** */ mfspr r3, SPRN_AMR std r3, THREAD_TM_AMR(r12) /* * MSR and flags: We don't change CRs, and we don't need to alter MSR. */ /* * ******************** FPR/VR/VSRs ************ * After reclaiming, capture the checkpointed FPRs/VRs. * * We enabled VEC/FP/VSX in the msr above, so we can execute these * instructions! */ mr r3, r12 /* Altivec (VEC/VMX/VR)*/ addi r7, r3, THREAD_CKVRSTATE SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 ckvr_state */ mfvscr v0 li r6, VRSTATE_VSCR stvx v0, r7, r6 /* VRSAVE */ mfspr r0, SPRN_VRSAVE std r0, THREAD_CKVRSAVE(r3) /* Floating Point (FP) */ addi r7, r3, THREAD_CKFPSTATE SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 ckfp_state */ mffs fr0 stfd fr0,FPSTATE_FPSCR(r7) /* * TM regs, incl TEXASR -- these live in thread_struct. Note they've * been updated by the treclaim, to explain to userland the failure * cause (aborted). */ mfspr r0, SPRN_TEXASR mfspr r3, SPRN_TFHAR mfspr r4, SPRN_TFIAR std r0, THREAD_TM_TEXASR(r12) std r3, THREAD_TM_TFHAR(r12) std r4, THREAD_TM_TFIAR(r12) /* Restore kernel live AMR */ ld r8, TM_FRAME_L1(r1) mtspr SPRN_AMR, r8 /* Restore original MSR/IRQ state & clear TM mode */ ld r14, TM_FRAME_L0(r1) /* Orig MSR */ li r15, 0 rldimi r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1 mtmsrd r14 REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE lwz r4, 8(r1) ld r0, 16(r1) mtcr r4 mtlr r0 ld r2, STK_GOT(r1) /* Load CPU's default DSCR */ ld r0, PACA_DSCR_DEFAULT(r13) mtspr SPRN_DSCR, r0 blr /* * void __tm_recheckpoint(struct thread_struct *thread) * - Restore the checkpointed register state saved by tm_reclaim * when we switch_to a process. * * Call with IRQs off, stacks get all out of sync for * some periods in here! */ _GLOBAL(__tm_recheckpoint) mfcr r5 mflr r0 stw r5, 8(r1) std r0, 16(r1) std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* * We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. * This is used for backing up the NVGPRs: */ SAVE_NVGPRS(r1) /* * Save kernel live AMR since it will be clobbered for trechkpt * but can be used elsewhere later in kernel space. */ mfspr r8, SPRN_AMR std r8, TM_FRAME_L0(r1) /* Load complete register state from ts_ckpt* registers */ addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ /* * Make r7 look like an exception frame so that we can use the neat * GPRx(n) macros. r7 is now NOT a pt_regs ptr! */ subi r7, r7, STACK_FRAME_OVERHEAD /* We need to setup MSR for FP/VMX/VSX register save instructions. */ mfmsr r6 mr r5, r6 ori r5, r5, MSR_FP #ifdef CONFIG_ALTIVEC oris r5, r5, MSR_VEC@h #endif #ifdef CONFIG_VSX BEGIN_FTR_SECTION oris r5,r5, MSR_VSX@h END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif mtmsrd r5 #ifdef CONFIG_ALTIVEC /* * FP and VEC registers: These are recheckpointed from * thread.ckfp_state and thread.ckvr_state respectively. The * thread.fp_state[] version holds the 'live' (transactional) * and will be loaded subsequently by any FPUnavailable trap. */ addi r8, r3, THREAD_CKVRSTATE li r5, VRSTATE_VSCR lvx v0, r8, r5 mtvscr v0 REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ ld r5, THREAD_CKVRSAVE(r3) mtspr SPRN_VRSAVE, r5 #endif addi r8, r3, THREAD_CKFPSTATE lfd fr0, FPSTATE_FPSCR(r8) MTFSF_L(fr0) REST_32FPRS_VSRS(0, R4, R8) mtmsr r6 /* FP/Vec off again! */ restore_gprs: /* ****************** CTR, LR, XER ************* */ ld r4, _CTR(r7) ld r5, _LINK(r7) ld r8, _XER(r7) mtctr r4 mtlr r5 mtxer r8 /* ******************** TAR ******************** */ ld r4, THREAD_TM_TAR(r3) mtspr SPRN_TAR, r4 /* ******************** AMR ******************** */ ld r4, THREAD_TM_AMR(r3) mtspr SPRN_AMR, r4 /* Load up the PPR and DSCR in GPRs only at this stage */ ld r5, THREAD_TM_DSCR(r3) ld r6, THREAD_TM_PPR(r3) REST_GPR(0, r7) /* GPR0 */ REST_GPRS(2, 4, r7) /* GPR2-4 */ REST_GPRS(8, 12, r7) /* GPR8-12 */ REST_GPRS(14, 31, r7) /* GPR14-31 */ /* Load up PPR and DSCR here so we don't run with user values for long */ mtspr SPRN_DSCR, r5 mtspr SPRN_PPR, r6 /* * Do final sanity check on TEXASR to make sure FS is set. Do this * here before we load up the userspace r1 so any bugs we hit will get * a call chain. */ mfspr r5, SPRN_TEXASR srdi r5, r5, 16 li r6, (TEXASR_FS)@h and r6, r6, r5 1: tdeqi r6, 0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 /* * Do final sanity check on MSR to make sure we are not transactional * or suspended. */ mfmsr r6 li r5, (MSR_TS_MASK)@higher srdi r6, r6, 32 and r6, r6, r5 1: tdnei r6, 0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 /* Restore CR */ ld r6, _CCR(r7) mtcr r6 REST_GPR(6, r7) /* * Store user r1 and r5 and r13 on the stack (in the unused save * areas / compiler reserved areas), so that we can access them after * we clear MSR RI. */ REST_GPR(5, r7) std r5, -8(r1) ld r5, GPR13(r7) std r5, -16(r1) ld r5, GPR1(r7) std r5, -24(r1) REST_GPR(7, r7) /* Stash the stack pointer away for use after recheckpoint */ std r1, PACAR1(r13) /* Clear MSR RI since we are about to clobber r13. EE is already off */ li r5, 0 mtmsrd r5, 1 /* * BE CAREFUL HERE: * At this point we can't take an SLB miss since we have MSR_RI * off. Load only to/from the stack/paca which are in SLB bolted regions * until we turn MSR RI back on. */ ld r5, -8(r1) ld r13, -16(r1) ld r1, -24(r1) /* Commit register state as checkpointed state: */ TRECHKPT HMT_MEDIUM /* * Our transactional state has now changed. * * Now just get out of here. Transactional (current) state will be * updated once restore is called on the return path in the _switch-ed * -to process. */ GET_PACA(r13) ld r1, PACAR1(r13) /* R13, R1 is restored, so we are recoverable again. EE is still off */ li r4, MSR_RI mtmsrd r4, 1 /* Restore kernel live AMR */ ld r8, TM_FRAME_L0(r1) mtspr SPRN_AMR, r8 REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE lwz r4, 8(r1) ld r0, 16(r1) mtcr r4 mtlr r0 ld r2, STK_GOT(r1) /* Load CPU's default DSCR */ ld r0, PACA_DSCR_DEFAULT(r13) mtspr SPRN_DSCR, r0 blr /* ****************************************************************** */
aixcc-public/challenge-001-exemplar-source
3,715
arch/powerpc/kernel/fpu.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * FPU support code, moved here from head.S so that it can be used * by chips which use other head-whatever.S files. * * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Copyright (C) 1996 Paul Mackerras. * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). */ #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/export.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> #ifdef CONFIG_VSX #define __REST_32FPVSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ REST_32FPRS(n,base); \ b 3f; \ 2: REST_32VSRS(n,c,base); \ 3: #define __SAVE_32FPVSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ SAVE_32FPRS(n,base); \ b 3f; \ 2: SAVE_32VSRS(n,c,base); \ 3: #else #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) #endif #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) /* * Load state from memory into FP registers including FPSCR. * Assumes the caller has enabled FP in the MSR. */ _GLOBAL(load_fp_state) lfd fr0,FPSTATE_FPSCR(r3) MTFSF_L(fr0) REST_32FPVSRS(0, R4, R3) blr EXPORT_SYMBOL(load_fp_state) _ASM_NOKPROBE_SYMBOL(load_fp_state); /* used by restore_math */ /* * Store FP state into memory, including FPSCR * Assumes the caller has enabled FP in the MSR. */ _GLOBAL(store_fp_state) SAVE_32FPVSRS(0, R4, R3) mffs fr0 stfd fr0,FPSTATE_FPSCR(r3) blr EXPORT_SYMBOL(store_fp_state) /* * This task wants to use the FPU now. * On UP, disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Load up this task's FP registers from its thread_struct, * enable the FPU for the current task and return to the task. * Note that on 32-bit this can only use registers that will be * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. */ _GLOBAL(load_up_fpu) mfmsr r5 #ifdef CONFIG_PPC_BOOK3S_64 /* interrupt doesn't set MSR[RI] and HPT can fault on current access */ ori r5,r5,MSR_FP|MSR_RI #else ori r5,r5,MSR_FP #endif #ifdef CONFIG_VSX BEGIN_FTR_SECTION oris r5,r5,MSR_VSX@h END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif MTMSRD(r5) /* enable use of fpu now */ isync /* enable use of FP after return */ #ifdef CONFIG_PPC32 addi r5,r2,THREAD lwz r4,THREAD_FPEXC_MODE(r5) ori r9,r9,MSR_FP /* enable FP for current */ or r9,r9,r4 #else ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ lwz r4,THREAD_FPEXC_MODE(r5) ori r12,r12,MSR_FP or r12,r12,r4 std r12,_MSR(r1) #ifdef CONFIG_PPC_BOOK3S_64 li r4,0 stb r4,PACASRR_VALID(r13) #endif #endif li r4,1 stb r4,THREAD_LOAD_FP(r5) addi r10,r5,THREAD_FPSTATE lfd fr0,FPSTATE_FPSCR(r10) MTFSF_L(fr0) REST_32FPVSRS(0, R4, R10) /* restore registers and return */ /* we haven't used ctr or xer or lr */ blr _ASM_NOKPROBE_SYMBOL(load_up_fpu) /* * save_fpu(tsk) * Save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */ _GLOBAL(save_fpu) addi r3,r3,THREAD /* want THREAD of task */ PPC_LL r6,THREAD_FPSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) PPC_LCMPI 0,r6,0 bne 2f addi r6,r3,THREAD_FPSTATE 2: SAVE_32FPVSRS(0, R4, R6) mffs fr0 stfd fr0,FPSTATE_FPSCR(r6) blr
aixcc-public/challenge-001-exemplar-source
1,316
arch/powerpc/kernel/ppc_save_regs.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 1996 Paul Mackerras. * * NOTE: assert(sizeof(buf) > 23 * sizeof(long)) */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/asm-compat.h> /* * Grab the register values as they are now. * This won't do a particularly good job because we really * want our caller's caller's registers, and our caller has * already executed its prologue. * ToDo: We could reach back into the caller's save area to do * a better job of representing the caller's state (note that * that will be different for 32-bit and 64-bit, because of the * different ABIs, though). */ _GLOBAL(ppc_save_regs) /* This allows stack frame accessor macros and offsets to be used */ subi r3,r3,STACK_FRAME_OVERHEAD PPC_STL r0,GPR0(r3) #ifdef CONFIG_PPC32 stmw r2,GPR2(r3) #else SAVE_GPRS(2, 31, r3) lbz r0,PACAIRQSOFTMASK(r13) PPC_STL r0,SOFTE(r3) #endif /* store current SP */ PPC_STL r1,GPR1(r3) /* get caller's LR */ PPC_LL r4,0(r1) PPC_LL r0,LRSAVE(r4) PPC_STL r0,_LINK(r3) mflr r0 PPC_STL r0,_NIP(r3) mfmsr r0 PPC_STL r0,_MSR(r3) mfctr r0 PPC_STL r0,_CTR(r3) mfxer r0 PPC_STL r0,_XER(r3) mfcr r0 PPC_STL r0,_CCR(r3) li r0,0 PPC_STL r0,_TRAP(r3) PPC_STL r0,ORIG_GPR3(r3) blr
aixcc-public/challenge-001-exemplar-source
2,336
arch/powerpc/kernel/misc.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains miscellaneous low-level functions. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) * * setjmp/longjmp code by Paul Mackerras. */ #include <asm/ppc_asm.h> #include <asm/unistd.h> #include <asm/asm-compat.h> #include <asm/asm-offsets.h> #include <asm/export.h> .text /* * Returns (address we are running at) - (address we were linked at) * for use before the text and data are mapped to KERNELBASE. * add_reloc_offset(x) returns x + reloc_offset(). */ _GLOBAL(reloc_offset) li r3, 0 _GLOBAL(add_reloc_offset) mflr r0 bcl 20,31,$+4 1: mflr r5 PPC_LL r4,(2f-1b)(r5) subf r5,r4,r5 add r3,r3,r5 mtlr r0 blr _ASM_NOKPROBE_SYMBOL(reloc_offset) _ASM_NOKPROBE_SYMBOL(add_reloc_offset) .align 3 2: PPC_LONG 1b _GLOBAL(setjmp) mflr r0 PPC_STL r0,0(r3) PPC_STL r1,SZL(r3) PPC_STL r2,2*SZL(r3) #ifdef CONFIG_PPC32 mfcr r12 stmw r12, 3*SZL(r3) #else mfcr r0 PPC_STL r0,3*SZL(r3) PPC_STL r13,4*SZL(r3) PPC_STL r14,5*SZL(r3) PPC_STL r15,6*SZL(r3) PPC_STL r16,7*SZL(r3) PPC_STL r17,8*SZL(r3) PPC_STL r18,9*SZL(r3) PPC_STL r19,10*SZL(r3) PPC_STL r20,11*SZL(r3) PPC_STL r21,12*SZL(r3) PPC_STL r22,13*SZL(r3) PPC_STL r23,14*SZL(r3) PPC_STL r24,15*SZL(r3) PPC_STL r25,16*SZL(r3) PPC_STL r26,17*SZL(r3) PPC_STL r27,18*SZL(r3) PPC_STL r28,19*SZL(r3) PPC_STL r29,20*SZL(r3) PPC_STL r30,21*SZL(r3) PPC_STL r31,22*SZL(r3) #endif li r3,0 blr _GLOBAL(longjmp) #ifdef CONFIG_PPC32 lmw r12, 3*SZL(r3) mtcrf 0x38, r12 #else PPC_LL r13,4*SZL(r3) PPC_LL r14,5*SZL(r3) PPC_LL r15,6*SZL(r3) PPC_LL r16,7*SZL(r3) PPC_LL r17,8*SZL(r3) PPC_LL r18,9*SZL(r3) PPC_LL r19,10*SZL(r3) PPC_LL r20,11*SZL(r3) PPC_LL r21,12*SZL(r3) PPC_LL r22,13*SZL(r3) PPC_LL r23,14*SZL(r3) PPC_LL r24,15*SZL(r3) PPC_LL r25,16*SZL(r3) PPC_LL r26,17*SZL(r3) PPC_LL r27,18*SZL(r3) PPC_LL r28,19*SZL(r3) PPC_LL r29,20*SZL(r3) PPC_LL r30,21*SZL(r3) PPC_LL r31,22*SZL(r3) PPC_LL r0,3*SZL(r3) mtcrf 0x38,r0 #endif PPC_LL r0,0(r3) PPC_LL r1,SZL(r3) PPC_LL r2,2*SZL(r3) mtlr r0 mr. r3, r4 bnelr li r3, 1 blr _GLOBAL(current_stack_frame) PPC_LL r3,0(r1) blr EXPORT_SYMBOL(current_stack_frame)
aixcc-public/challenge-001-exemplar-source
11,096
arch/powerpc/kernel/cpu_setup_6xx.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains low level CPU setup functions. * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) */ #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/mmu.h> #include <asm/feature-fixups.h> _GLOBAL(__setup_cpu_603) mflr r5 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches mtlr r5 blr _GLOBAL(__setup_cpu_604) mflr r5 bl setup_common_caches bl setup_604_hid0 mtlr r5 blr _GLOBAL(__setup_cpu_750) mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 mtlr r5 blr _GLOBAL(__setup_cpu_750cx) mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx mtlr r5 blr _GLOBAL(__setup_cpu_750fx) mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx mtlr r5 blr _GLOBAL(__setup_cpu_7400) mflr r5 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 mtlr r5 blr _GLOBAL(__setup_cpu_7410) mflr r5 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 mtlr r5 blr _GLOBAL(__setup_cpu_745x) mflr r5 bl setup_common_caches bl setup_745x_specifics mtlr r5 blr /* Enable caches for 603's, 604, 750 & 7400 */ setup_common_caches: mfspr r11,SPRN_HID0 andi. r0,r11,HID0_DCE ori r11,r11,HID0_ICE|HID0_DCE ori r8,r11,HID0_ICFI bne 1f /* don't invalidate the D-cache */ ori r8,r8,HID0_DCI /* unless it wasn't enabled */ 1: sync mtspr SPRN_HID0,r8 /* enable and invalidate caches */ sync mtspr SPRN_HID0,r11 /* enable caches */ sync isync blr /* 604, 604e, 604ev, ... * Enable superscalar execution & branch history table */ setup_604_hid0: mfspr r11,SPRN_HID0 ori r11,r11,HID0_SIED|HID0_BHTE ori r8,r11,HID0_BTCD sync mtspr SPRN_HID0,r8 /* flush branch target address cache */ sync /* on 604e/604r */ mtspr SPRN_HID0,r11 sync isync blr /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some * erratas we work around here. * Moto MPC710CE.pdf describes them, those are errata * #3, #4 and #5 * Note that we assume the firmware didn't choose to * apply other workarounds (there are other ones documented * in the .pdf). It appear that Apple firmware only works * around #3 and with the same fix we use. We may want to * check if the CPU is using 60x bus mode in which case * the workaround for errata #4 is useless. Also, we may * want to explicitly clear HID0_NOPDST as this is not * needed once we have applied workaround #5 (though it's * not set by Apple's firmware at least). */ setup_7400_workarounds: mfpvr r3 rlwinm r3,r3,0,20,31 cmpwi 0,r3,0x0207 ble 1f blr setup_7410_workarounds: mfpvr r3 rlwinm r3,r3,0,20,31 cmpwi 0,r3,0x0100 bnelr 1: mfspr r11,SPRN_MSSSR0 /* Errata #3: Set L1OPQ_SIZE to 0x10 */ rlwinm r11,r11,0,9,6 oris r11,r11,0x0100 /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */ oris r11,r11,0x0002 /* Errata #5: Set DRLT_SIZE to 0x01 */ rlwinm r11,r11,0,5,2 oris r11,r11,0x0800 sync mtspr SPRN_MSSSR0,r11 sync isync blr /* 740/750/7400/7410 * Enable Store Gathering (SGE), Address Broadcast (ABE), * Branch History Table (BHTE), Branch Target ICache (BTIC) * Dynamic Power Management (DPM), Speculative (SPD) * Clear Instruction cache throttling (ICTC) */ setup_750_7400_hid0: mfspr r11,SPRN_HID0 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC oris r11,r11,HID0_DPM@h BEGIN_FTR_SECTION xori r11,r11,HID0_BTIC END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) BEGIN_FTR_SECTION xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) li r3,HID0_SPD andc r11,r11,r3 /* clear SPD: enable speculative */ li r3,0 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ isync mtspr SPRN_HID0,r11 sync isync blr /* 750cx specific * Looks like we have to disable NAP feature for some PLL settings... * (waiting for confirmation) */ setup_750cx: mfspr r10, SPRN_HID1 rlwinm r10,r10,4,28,31 cmpwi cr0,r10,7 cmpwi cr1,r10,9 cmpwi cr2,r10,11 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr lwz r6,CPU_SPEC_FEATURES(r4) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 stw r6,CPU_SPEC_FEATURES(r4) blr /* 750fx specific */ setup_750fx: blr /* MPC 745x * Enable Store Gathering (SGE), Branch Folding (FOLD) * Branch History Table (BHTE), Branch Target ICache (BTIC) * Dynamic Power Management (DPM), Speculative (SPD) * Ensure our data cache instructions really operate. * Timebase has to be running or we wouldn't have made it here, * just ensure we don't disable it. * Clear Instruction cache throttling (ICTC) * Enable L2 HW prefetch */ setup_745x_specifics: /* We check for the presence of an L3 cache setup by * the firmware. If any, we disable NAP capability as * it's known to be bogus on rev 2.1 and earlier */ BEGIN_FTR_SECTION mfspr r11,SPRN_L3CR andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) lwz r6,CPU_SPEC_FEATURES(r4) andis. r0,r6,CPU_FTR_L3_DISABLE_NAP@h beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 stw r6,CPU_SPEC_FEATURES(r4) 1: mfspr r11,SPRN_HID0 /* All of the bits we have to set..... */ ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE ori r11,r11,HID0_LRSTK | HID0_BTIC oris r11,r11,HID0_DPM@h BEGIN_MMU_FTR_SECTION oris r11,r11,HID0_HIGH_BAT@h END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) BEGIN_FTR_SECTION xori r11,r11,HID0_BTIC END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) BEGIN_FTR_SECTION xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) /* All of the bits we have to clear.... */ li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI andc r11,r11,r3 /* clear SPD: enable speculative */ li r3,0 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ isync mtspr SPRN_HID0,r11 sync isync /* Enable L2 HW prefetch, if L2 is enabled */ mfspr r3,SPRN_L2CR andis. r3,r3,L2CR_L2E@h beqlr mfspr r3,SPRN_MSSCR0 ori r3,r3,3 sync mtspr SPRN_MSSCR0,r3 sync isync blr /* * Initialize the FPU registers. This is needed to work around an errata * in some 750 cpus where using a not yet initialized FPU register after * power on reset may hang the CPU */ _GLOBAL(__init_fpu_registers) mfmsr r10 ori r11,r10,MSR_FP mtmsr r11 isync addis r9,r3,empty_zero_page@ha addi r9,r9,empty_zero_page@l REST_32FPRS(0,r9) sync mtmsr r10 isync blr _ASM_NOKPROBE_SYMBOL(__init_fpu_registers) /* Definitions for the table use to save CPU states */ #define CS_HID0 0 #define CS_HID1 4 #define CS_HID2 8 #define CS_MSSCR0 12 #define CS_MSSSR0 16 #define CS_ICTRL 20 #define CS_LDSTCR 24 #define CS_LDSTDB 28 #define CS_SIZE 32 .data .balign L1_CACHE_BYTES cpu_state_storage: .space CS_SIZE .balign L1_CACHE_BYTES,0 .text /* Called in normal context to backup CPU 0 state. This * does not include cache settings. This function is also * called for machine sleep. This does not include the MMU * setup, BATs, etc... but rather the "special" registers * like HID0, HID1, MSSCR0, etc... */ _GLOBAL(__save_cpu_setup) /* Some CR fields are volatile, we back it up all */ mfcr r7 /* Get storage ptr */ lis r5,cpu_state_storage@h ori r5,r5,cpu_state_storage@l /* Save HID0 (common to all CONFIG_PPC_BOOK3S_32 cpus) */ mfspr r3,SPRN_HID0 stw r3,CS_HID0(r5) /* Now deal with CPU type dependent registers */ mfspr r3,SPRN_PVR srwi r3,r3,16 cmplwi cr0,r3,0x8000 /* 7450 */ cmplwi cr1,r3,0x000c /* 7400 */ cmplwi cr2,r3,0x800c /* 7410 */ cmplwi cr3,r3,0x8001 /* 7455 */ cmplwi cr4,r3,0x8002 /* 7457 */ cmplwi cr5,r3,0x8003 /* 7447A */ cmplwi cr6,r3,0x7000 /* 750FX */ cmplwi cr7,r3,0x8004 /* 7448 */ /* cr1 is 7400 || 7410 */ cror 4*cr1+eq,4*cr1+eq,4*cr2+eq /* cr0 is 74xx */ cror 4*cr0+eq,4*cr0+eq,4*cr3+eq cror 4*cr0+eq,4*cr0+eq,4*cr4+eq cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr5+eq cror 4*cr0+eq,4*cr0+eq,4*cr7+eq bne 1f /* Backup 74xx specific regs */ mfspr r4,SPRN_MSSCR0 stw r4,CS_MSSCR0(r5) mfspr r4,SPRN_MSSSR0 stw r4,CS_MSSSR0(r5) beq cr1,1f /* Backup 745x specific registers */ mfspr r4,SPRN_HID1 stw r4,CS_HID1(r5) mfspr r4,SPRN_ICTRL stw r4,CS_ICTRL(r5) mfspr r4,SPRN_LDSTCR stw r4,CS_LDSTCR(r5) mfspr r4,SPRN_LDSTDB stw r4,CS_LDSTDB(r5) 1: bne cr6,1f /* Backup 750FX specific registers */ mfspr r4,SPRN_HID1 stw r4,CS_HID1(r5) /* If rev 2.x, backup HID2 */ mfspr r3,SPRN_PVR andi. r3,r3,0xff00 cmpwi cr0,r3,0x0200 bne 1f mfspr r4,SPRN_HID2 stw r4,CS_HID2(r5) 1: mtcr r7 blr /* Called with no MMU context (typically MSR:IR/DR off) to * restore CPU state as backed up by the previous * function. This does not include cache setting */ _GLOBAL(__restore_cpu_setup) /* Some CR fields are volatile, we back it up all */ mfcr r7 /* Get storage ptr */ lis r5,(cpu_state_storage-KERNELBASE)@h ori r5,r5,cpu_state_storage@l /* Restore HID0 */ lwz r3,CS_HID0(r5) sync isync mtspr SPRN_HID0,r3 sync isync /* Now deal with CPU type dependent registers */ mfspr r3,SPRN_PVR srwi r3,r3,16 cmplwi cr0,r3,0x8000 /* 7450 */ cmplwi cr1,r3,0x000c /* 7400 */ cmplwi cr2,r3,0x800c /* 7410 */ cmplwi cr3,r3,0x8001 /* 7455 */ cmplwi cr4,r3,0x8002 /* 7457 */ cmplwi cr5,r3,0x8003 /* 7447A */ cmplwi cr6,r3,0x7000 /* 750FX */ cmplwi cr7,r3,0x8004 /* 7448 */ /* cr1 is 7400 || 7410 */ cror 4*cr1+eq,4*cr1+eq,4*cr2+eq /* cr0 is 74xx */ cror 4*cr0+eq,4*cr0+eq,4*cr3+eq cror 4*cr0+eq,4*cr0+eq,4*cr4+eq cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr5+eq cror 4*cr0+eq,4*cr0+eq,4*cr7+eq bne 2f /* Restore 74xx specific regs */ lwz r4,CS_MSSCR0(r5) sync mtspr SPRN_MSSCR0,r4 sync isync lwz r4,CS_MSSSR0(r5) sync mtspr SPRN_MSSSR0,r4 sync isync bne cr2,1f /* Clear 7410 L2CR2 */ li r4,0 mtspr SPRN_L2CR2,r4 1: beq cr1,2f /* Restore 745x specific registers */ lwz r4,CS_HID1(r5) sync mtspr SPRN_HID1,r4 isync sync lwz r4,CS_ICTRL(r5) sync mtspr SPRN_ICTRL,r4 isync sync lwz r4,CS_LDSTCR(r5) sync mtspr SPRN_LDSTCR,r4 isync sync lwz r4,CS_LDSTDB(r5) sync mtspr SPRN_LDSTDB,r4 isync sync 2: bne cr6,1f /* Restore 750FX specific registers * that is restore HID2 on rev 2.x and PLL config & switch * to PLL 0 on all */ /* If rev 2.x, restore HID2 with low voltage bit cleared */ mfspr r3,SPRN_PVR andi. r3,r3,0xff00 cmpwi cr0,r3,0x0200 bne 4f lwz r4,CS_HID2(r5) rlwinm r4,r4,0,19,17 mtspr SPRN_HID2,r4 sync 4: lwz r4,CS_HID1(r5) rlwinm r5,r4,0,16,14 mtspr SPRN_HID1,r5 /* Wait for PLL to stabilize */ mftbl r5 3: mftbl r6 sub r6,r6,r5 cmplwi cr0,r6,10000 ble 3b /* Setup final PLL */ mtspr SPRN_HID1,r4 1: mtcr r7 blr _ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)
aixcc-public/challenge-001-exemplar-source
1,274
arch/powerpc/kernel/note.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * PowerPC ELF notes. * * Copyright 2019, IBM Corporation */ #include <linux/elfnote.h> #include <asm/elfnote.h> /* * Ultravisor-capable bit (PowerNV only). * * Bit 0 indicates that the powerpc kernel binary knows how to run in an * ultravisor-enabled system. * * In an ultravisor-enabled system, some machine resources are now controlled * by the ultravisor. If the kernel is not ultravisor-capable, but it ends up * being run on a machine with ultravisor, the kernel will probably crash * trying to access ultravisor resources. For instance, it may crash in early * boot trying to set the partition table entry 0. * * In an ultravisor-enabled system, a bootloader could warn the user or prevent * the kernel from being run if the PowerPC ultravisor capability doesn't exist * or the Ultravisor-capable bit is not set. */ #ifdef CONFIG_PPC_POWERNV #define PPCCAP_ULTRAVISOR_BIT (1 << 0) #else #define PPCCAP_ULTRAVISOR_BIT 0 #endif /* * Add the PowerPC Capabilities in the binary ELF note. It is a bitmap that * can be used to advertise kernel capabilities to userland. */ #define PPC_CAPABILITIES_BITMAP (PPCCAP_ULTRAVISOR_BIT) ELFNOTE(PowerPC, PPC_ELFNOTE_CAPABILITIES, .long PPC_CAPABILITIES_BITMAP)
aixcc-public/challenge-001-exemplar-source
90,239
arch/powerpc/kernel/exceptions-64s.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This file contains the 64-bit "server" PowerPC variant * of the low level exception handling including exception * vectors, exception return, part of the slb and stab * handling and other fixed offset specific things. * * This file is meant to be #included from head_64.S due to * position dependent assembly. * * Most of this originates from head_64.S and thus has the same * copyright history. * */ #include <asm/hw_irq.h> #include <asm/exception-64s.h> #include <asm/ptrace.h> #include <asm/cpuidle.h> #include <asm/head-64.h> #include <asm/feature-fixups.h> #include <asm/kup.h> /* * Following are fixed section helper macros. * * EXC_REAL_BEGIN/END - real, unrelocated exception vectors * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these) * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use) * EXC_COMMON - After switching to virtual, relocated mode. */ #define EXC_REAL_BEGIN(name, start, size) \ FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size) #define EXC_REAL_END(name, start, size) \ FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size) #define EXC_VIRT_BEGIN(name, start, size) \ FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) #define EXC_VIRT_END(name, start, size) \ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size) #define EXC_COMMON_BEGIN(name) \ USE_TEXT_SECTION(); \ .balign IFETCH_ALIGN_BYTES; \ .global name; \ _ASM_NOKPROBE_SYMBOL(name); \ DEFINE_FIXED_SYMBOL(name, text); \ name: #define TRAMP_REAL_BEGIN(name) \ FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name) #define TRAMP_VIRT_BEGIN(name) \ FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name) #define EXC_REAL_NONE(start, size) \ FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \ FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size) #define EXC_VIRT_NONE(start, size) \ FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size) /* * We're short on space and time in the exception prolog, so we can't * use the normal LOAD_REG_IMMEDIATE macro to load the address of label. * Instead we get the base of the kernel from paca->kernelbase and or in the low * part of label. This requires that the label be within 64KB of kernelbase, and * that kernelbase be 64K aligned. */ #define LOAD_HANDLER(reg, label) \ ld reg,PACAKBASE(r13); /* get high part of &label */ \ ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label) #define __LOAD_HANDLER(reg, label, section) \ ld reg,PACAKBASE(r13); \ ori reg,reg,(ABS_ADDR(label, section))@l /* * Branches from unrelocated code (e.g., interrupts) to labels outside * head-y require >64K offsets. */ #define __LOAD_FAR_HANDLER(reg, label, section) \ ld reg,PACAKBASE(r13); \ ori reg,reg,(ABS_ADDR(label, section))@l; \ addis reg,reg,(ABS_ADDR(label, section))@h /* * Interrupt code generation macros */ #define IVEC .L_IVEC_\name\() /* Interrupt vector address */ #define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */ #define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */ #define IAREA .L_IAREA_\name\() /* PACA save area */ #define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */ #define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */ #define ICFAR .L_ICFAR_\name\() /* Uses CFAR */ #define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */ #define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */ #define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */ #define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */ #define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */ #define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */ #define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */ #define __IKVM_REAL(name) .L_IKVM_REAL_ ## name #define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */ #define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */ #define __ISTACK(name) .L_ISTACK_ ## name #define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */ #define INT_DEFINE_BEGIN(n) \ .macro int_define_ ## n name #define INT_DEFINE_END(n) \ .endm ; \ int_define_ ## n n ; \ do_define_int n .macro do_define_int name .ifndef IVEC .error "IVEC not defined" .endif .ifndef IHSRR IHSRR=0 .endif .ifndef IHSRR_IF_HVMODE IHSRR_IF_HVMODE=0 .endif .ifndef IAREA IAREA=PACA_EXGEN .endif .ifndef IVIRT IVIRT=1 .endif .ifndef IISIDE IISIDE=0 .endif .ifndef ICFAR ICFAR=1 .endif .ifndef ICFAR_IF_HVMODE ICFAR_IF_HVMODE=0 .endif .ifndef IDAR IDAR=0 .endif .ifndef IDSISR IDSISR=0 .endif .ifndef IBRANCH_TO_COMMON IBRANCH_TO_COMMON=1 .endif .ifndef IREALMODE_COMMON IREALMODE_COMMON=0 .else .if ! IBRANCH_TO_COMMON .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0" .endif .endif .ifndef IMASK IMASK=0 .endif .ifndef IKVM_REAL IKVM_REAL=0 .endif .ifndef IKVM_VIRT IKVM_VIRT=0 .endif .ifndef ISTACK ISTACK=1 .endif .ifndef IKUAP IKUAP=1 .endif .endm /* * All interrupts which set HSRR registers, as well as SRESET and MCE and * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken, * so they all generally need to test whether they were taken in guest context. * * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be * taken with MSR[HV]=0. * * Interrupts which set SRR registers (with the above exceptions) do not * elevate to MSR[HV]=1 mode, though most can be taken when running with * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do * not need to test whether a guest is running because they get delivered to * the guest directly, including nested HV KVM guests. * * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be * delivered to the real-mode entry point, therefore such interrupts only test * KVM in their real mode handlers, and only when PR KVM is possible. * * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always * delivered in real-mode when the MMU is in hash mode because the MMU * registers are not set appropriately to translate host addresses. In nested * radix mode these can be delivered in virt-mode as the host translations are * used implicitly (see: effective LPID, effective PID). */ /* * If an interrupt is taken while a guest is running, it is immediately routed * to KVM to handle. */ .macro KVMTEST name handler #ifdef CONFIG_KVM_BOOK3S_64_HANDLER lbz r10,HSTATE_IN_GUEST(r13) cmpwi r10,0 /* HSRR variants have the 0x2 bit added to their trap number */ .if IHSRR_IF_HVMODE BEGIN_FTR_SECTION li r10,(IVEC + 0x2) FTR_SECTION_ELSE li r10,(IVEC) ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) .elseif IHSRR li r10,(IVEC + 0x2) .else li r10,(IVEC) .endif bne \handler #endif .endm /* * This is the BOOK3S interrupt entry code macro. * * This can result in one of several things happening: * - Branch to the _common handler, relocated, in virtual mode. * These are normal interrupts (synchronous and asynchronous) handled by * the kernel. * - Branch to KVM, relocated but real mode interrupts remain in real mode. * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by * / intended for host or guest kernel, but KVM must always be involved * because the machine state is set for guest execution. * - Branch to the masked handler, unrelocated. * These occur when maskable asynchronous interrupts are taken with the * irq_soft_mask set. * - Branch to an "early" handler in real mode but relocated. * This is done if early=1. MCE and HMI use these to handle errors in real * mode. * - Fall through and continue executing in real, unrelocated mode. * This is done if early=2. */ .macro GEN_BRANCH_TO_COMMON name, virt .if IREALMODE_COMMON LOAD_HANDLER(r10, \name\()_common) mtctr r10 bctr .else .if \virt #ifndef CONFIG_RELOCATABLE b \name\()_common_virt #else LOAD_HANDLER(r10, \name\()_common_virt) mtctr r10 bctr #endif .else LOAD_HANDLER(r10, \name\()_common_real) mtctr r10 bctr .endif .endif .endm .macro GEN_INT_ENTRY name, virt, ool=0 SET_SCRATCH0(r13) /* save r13 */ GET_PACA(r13) std r9,IAREA+EX_R9(r13) /* save r9 */ BEGIN_FTR_SECTION mfspr r9,SPRN_PPR END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) HMT_MEDIUM std r10,IAREA+EX_R10(r13) /* save r10 */ .if ICFAR BEGIN_FTR_SECTION mfspr r10,SPRN_CFAR END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .elseif ICFAR_IF_HVMODE BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(69) mfspr r10,SPRN_CFAR END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) FTR_SECTION_ELSE BEGIN_FTR_SECTION_NESTED(69) li r10,0 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69) ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) .endif .if \ool .if !\virt b tramp_real_\name .pushsection .text TRAMP_REAL_BEGIN(tramp_real_\name) .else b tramp_virt_\name .pushsection .text TRAMP_VIRT_BEGIN(tramp_virt_\name) .endif .endif BEGIN_FTR_SECTION std r9,IAREA+EX_PPR(r13) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) .if ICFAR || ICFAR_IF_HVMODE BEGIN_FTR_SECTION std r10,IAREA+EX_CFAR(r13) END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .endif INTERRUPT_TO_KERNEL mfctr r10 std r10,IAREA+EX_CTR(r13) mfcr r9 std r11,IAREA+EX_R11(r13) /* save r11 - r12 */ std r12,IAREA+EX_R12(r13) /* * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI], * because a d-side MCE will clobber those registers so is * not recoverable if they are live. */ GET_SCRATCH0(r10) std r10,IAREA+EX_R13(r13) .if IDAR && !IISIDE .if IHSRR mfspr r10,SPRN_HDAR .else mfspr r10,SPRN_DAR .endif std r10,IAREA+EX_DAR(r13) .endif .if IDSISR && !IISIDE .if IHSRR mfspr r10,SPRN_HDSISR .else mfspr r10,SPRN_DSISR .endif stw r10,IAREA+EX_DSISR(r13) .endif .if IHSRR_IF_HVMODE BEGIN_FTR_SECTION mfspr r11,SPRN_HSRR0 /* save HSRR0 */ mfspr r12,SPRN_HSRR1 /* and HSRR1 */ FTR_SECTION_ELSE mfspr r11,SPRN_SRR0 /* save SRR0 */ mfspr r12,SPRN_SRR1 /* and SRR1 */ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) .elseif IHSRR mfspr r11,SPRN_HSRR0 /* save HSRR0 */ mfspr r12,SPRN_HSRR1 /* and HSRR1 */ .else mfspr r11,SPRN_SRR0 /* save SRR0 */ mfspr r12,SPRN_SRR1 /* and SRR1 */ .endif .if IBRANCH_TO_COMMON GEN_BRANCH_TO_COMMON \name \virt .endif .if \ool .popsection .endif .endm /* * __GEN_COMMON_ENTRY is required to receive the branch from interrupt * entry, except in the case of the real-mode handlers which require * __GEN_REALMODE_COMMON_ENTRY. * * This switches to virtual mode and sets MSR[RI]. */ .macro __GEN_COMMON_ENTRY name DEFINE_FIXED_SYMBOL(\name\()_common_real, text) \name\()_common_real: .if IKVM_REAL KVMTEST \name kvm_interrupt .endif ld r10,PACAKMSR(r13) /* get MSR value for kernel */ /* MSR[RI] is clear iff using SRR regs */ .if IHSRR_IF_HVMODE BEGIN_FTR_SECTION xori r10,r10,MSR_RI END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) .elseif ! IHSRR xori r10,r10,MSR_RI .endif mtmsrd r10 .if IVIRT .if IKVM_VIRT b 1f /* skip the virt test coming from real */ .endif .balign IFETCH_ALIGN_BYTES DEFINE_FIXED_SYMBOL(\name\()_common_virt, text) \name\()_common_virt: .if IKVM_VIRT KVMTEST \name kvm_interrupt 1: .endif .endif /* IVIRT */ .endm /* * Don't switch to virt mode. Used for early MCE and HMI handlers that * want to run in real mode. */ .macro __GEN_REALMODE_COMMON_ENTRY name DEFINE_FIXED_SYMBOL(\name\()_common_real, text) \name\()_common_real: .if IKVM_REAL KVMTEST \name kvm_interrupt .endif .endm .macro __GEN_COMMON_BODY name .if IMASK .if ! ISTACK .error "No support for masked interrupt to use custom stack" .endif /* If coming from user, skip soft-mask tests. */ andi. r10,r12,MSR_PR bne 3f /* * Kernel code running below __end_soft_masked may be * implicitly soft-masked if it is within the regions * in the soft mask table. */ LOAD_HANDLER(r10, __end_soft_masked) cmpld r11,r10 bge+ 1f /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */ mtctr r12 stw r9,PACA_EXGEN+EX_CCR(r13) SEARCH_SOFT_MASK_TABLE cmpdi r12,0 mfctr r12 /* Restore r12 to SRR1 */ lwz r9,PACA_EXGEN+EX_CCR(r13) beq 1f /* Not in soft-mask table */ li r10,IMASK b 2f /* In soft-mask table, always mask */ /* Test the soft mask state against our interrupt's bit */ 1: lbz r10,PACAIRQSOFTMASK(r13) 2: andi. r10,r10,IMASK /* Associate vector numbers with bits in paca->irq_happened */ .if IVEC == 0x500 || IVEC == 0xea0 li r10,PACA_IRQ_EE .elseif IVEC == 0x900 li r10,PACA_IRQ_DEC .elseif IVEC == 0xa00 || IVEC == 0xe80 li r10,PACA_IRQ_DBELL .elseif IVEC == 0xe60 li r10,PACA_IRQ_HMI .elseif IVEC == 0xf00 li r10,PACA_IRQ_PMI .else .abort "Bad maskable vector" .endif .if IHSRR_IF_HVMODE BEGIN_FTR_SECTION bne masked_Hinterrupt FTR_SECTION_ELSE bne masked_interrupt ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) .elseif IHSRR bne masked_Hinterrupt .else bne masked_interrupt .endif .endif .if ISTACK andi. r10,r12,MSR_PR /* See if coming from user */ 3: mr r10,r1 /* Save r1 */ subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ beq- 100f ld r1,PACAKSAVE(r13) /* kernel stack to use */ 100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 .endif std r9,_CCR(r1) /* save CR in stackframe */ std r11,_NIP(r1) /* save SRR0 in stackframe */ std r12,_MSR(r1) /* save SRR1 in stackframe */ std r10,0(r1) /* make stack chain pointer */ std r0,GPR0(r1) /* save r0 in stackframe */ std r10,GPR1(r1) /* save r1 in stackframe */ /* Mark our [H]SRRs valid for return */ li r10,1 .if IHSRR_IF_HVMODE BEGIN_FTR_SECTION stb r10,PACAHSRR_VALID(r13) FTR_SECTION_ELSE stb r10,PACASRR_VALID(r13) ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) .elseif IHSRR stb r10,PACAHSRR_VALID(r13) .else stb r10,PACASRR_VALID(r13) .endif .if ISTACK .if IKUAP kuap_save_amr_and_lock r9, r10, cr1, cr0 .endif beq 101f /* if from kernel mode */ BEGIN_FTR_SECTION ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */ std r9,_PPR(r1) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 101: .else .if IKUAP kuap_save_amr_and_lock r9, r10, cr1 .endif .endif /* Save original regs values from save area to stack frame. */ ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */ ld r10,IAREA+EX_R10(r13) std r9,GPR9(r1) std r10,GPR10(r1) ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */ ld r10,IAREA+EX_R12(r13) ld r11,IAREA+EX_R13(r13) std r9,GPR11(r1) std r10,GPR12(r1) std r11,GPR13(r1) SAVE_NVGPRS(r1) .if IDAR .if IISIDE ld r10,_NIP(r1) .else ld r10,IAREA+EX_DAR(r13) .endif std r10,_DAR(r1) .endif .if IDSISR .if IISIDE ld r10,_MSR(r1) lis r11,DSISR_SRR1_MATCH_64S@h and r10,r10,r11 .else lwz r10,IAREA+EX_DSISR(r13) .endif std r10,_DSISR(r1) .endif BEGIN_FTR_SECTION .if ICFAR || ICFAR_IF_HVMODE ld r10,IAREA+EX_CFAR(r13) .else li r10,0 .endif std r10,ORIG_GPR3(r1) END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r10,IAREA+EX_CTR(r13) std r10,_CTR(r1) std r2,GPR2(r1) /* save r2 in stackframe */ SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ mflr r9 /* Get LR, later save to stack */ LOAD_PACA_TOC() /* get kernel TOC into r2 */ std r9,_LINK(r1) lbz r10,PACAIRQSOFTMASK(r13) mfspr r11,SPRN_XER /* save XER in stackframe */ std r10,SOFTE(r1) std r11,_XER(r1) li r9,IVEC std r9,_TRAP(r1) /* set trap number */ li r10,0 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r10,RESULT(r1) /* clear regs->result */ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ .endm /* * On entry r13 points to the paca, r9-r13 are saved in the paca, * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and * SRR1, and relocation is on. * * If stack=0, then the stack is already set in r1, and r1 is saved in r10. * PPR save and CPU accounting is not done for the !stack case (XXX why not?) */ .macro GEN_COMMON name __GEN_COMMON_ENTRY \name __GEN_COMMON_BODY \name .endm .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE mr r12,r2 LOAD_PACA_TOC() LOAD_REG_ADDR(r9, __start___restart_table) LOAD_REG_ADDR(r10, __stop___restart_table) mr r2,r12 #else LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table) LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table) #endif 300: cmpd r9,r10 beq 302f ld r12,0(r9) cmpld r11,r12 blt 301f ld r12,8(r9) cmpld r11,r12 bge 301f ld r12,16(r9) b 303f 301: addi r9,r9,24 b 300b 302: li r12,0 303: .endm .macro SEARCH_SOFT_MASK_TABLE #ifdef CONFIG_RELOCATABLE mr r12,r2 LOAD_PACA_TOC() LOAD_REG_ADDR(r9, __start___soft_mask_table) LOAD_REG_ADDR(r10, __stop___soft_mask_table) mr r2,r12 #else LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table) LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table) #endif 300: cmpd r9,r10 beq 302f ld r12,0(r9) cmpld r11,r12 blt 301f ld r12,8(r9) cmpld r11,r12 bge 301f li r12,1 b 303f 301: addi r9,r9,16 b 300b 302: li r12,0 303: .endm /* * Restore all registers including H/SRR0/1 saved in a stack frame of a * standard exception. */ .macro EXCEPTION_RESTORE_REGS hsrr=0 /* Move original SRR0 and SRR1 into the respective regs */ ld r9,_MSR(r1) li r10,0 .if \hsrr mtspr SPRN_HSRR1,r9 stb r10,PACAHSRR_VALID(r13) .else mtspr SPRN_SRR1,r9 stb r10,PACASRR_VALID(r13) .endif ld r9,_NIP(r1) .if \hsrr mtspr SPRN_HSRR0,r9 .else mtspr SPRN_SRR0,r9 .endif ld r9,_CTR(r1) mtctr r9 ld r9,_XER(r1) mtxer r9 ld r9,_LINK(r1) mtlr r9 ld r9,_CCR(r1) mtcr r9 REST_GPRS(2, 13, r1) REST_GPR(0, r1) /* restore original r1. */ ld r1,GPR1(r1) .endm /* * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot. * * There's a short window during boot where although the kernel is running * little endian, any exceptions will cause the CPU to switch back to big * endian. For example a WARN() boils down to a trap instruction, which will * cause a program check, and we end up here but with the CPU in big endian * mode. The first instruction of the program check handler (in GEN_INT_ENTRY * below) is an mtsprg, which when executed in the wrong endian is an lhzu with * a ~3GB displacement from r3. The content of r3 is random, so that is a load * from some random location, and depending on the system can easily lead to a * checkstop, or an infinitely recursive page fault. * * So to handle that case we have a trampoline here that can detect we are in * the wrong endian and flip us back to the correct endian. We can't flip * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1 * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for * the paca. SPRG3 is user readable, but this trampoline is only active very * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before * userspace starts. */ .macro EARLY_BOOT_FIXUP BEGIN_FTR_SECTION #ifdef CONFIG_CPU_LITTLE_ENDIAN tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 b 2f // Skip trampoline if endian is correct .long 0xa643707d // mtsprg 0, r11 Backup r11 .long 0xa6027a7d // mfsrr0 r11 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 .long 0xa6027b7d // mfsrr1 r11 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 .long 0xa600607d // mfmsr r11 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] .long 0xa6037b7d // mtsrr1 r11 /* * This is 'li r11,1f' where 1f is the absolute address of that * label, byteswapped into the SI field of the instruction. */ .long 0x00006039 | \ ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \ ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8) .long 0xa6037a7d // mtsrr0 r11 .long 0x2400004c // rfid 1: mfsprg r11, 3 mtsrr1 r11 // Restore SRR1 mfsprg r11, 2 mtsrr0 r11 // Restore SRR0 mfsprg r11, 0 // Restore r11 2: #endif /* * program check could hit at any time, and pseries can not block * MSR[ME] in early boot. So check if there is anything useful in r13 * yet, and spin forever if not. */ mtsprg 0, r11 mfcr r11 cmpdi r13, 0 beq . mtcr r11 mfsprg r11, 0 END_FTR_SECTION(0, 1) // nop out after boot .endm /* * There are a few constraints to be concerned with. * - Real mode exceptions code/data must be located at their physical location. * - Virtual mode exceptions must be mapped at their 0xc000... location. * - Fixed location code must not call directly beyond the __end_interrupts * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence * must be used. * - LOAD_HANDLER targets must be within first 64K of physical 0 / * virtual 0xc00... * - Conditional branch targets must be within +/-32K of caller. * * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and * therefore don't have to run in physically located code or rfid to * virtual mode kernel code. However on relocatable kernels they do have * to branch to KERNELBASE offset because the rest of the kernel (outside * the exception vectors) may be located elsewhere. * * Virtual exceptions correspond with physical, except their entry points * are offset by 0xc000000000000000 and also tend to get an added 0x4000 * offset applied. Virtual exceptions are enabled with the Alternate * Interrupt Location (AIL) bit set in the LPCR. However this does not * guarantee they will be delivered virtually. Some conditions (see the ISA) * cause exceptions to be delivered in real mode. * * The scv instructions are a special case. They get a 0x3000 offset applied. * scv exceptions have unique reentrancy properties, see below. * * It's impossible to receive interrupts below 0x300 via AIL. * * KVM: None of the virtual exceptions are from the guest. Anything that * escalated to HV=1 from HV=0 is delivered via real mode handlers. * * * We layout physical memory as follows: * 0x0000 - 0x00ff : Secondary processor spin code * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors * 0x1900 - 0x2fff : Real mode trampolines * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors * 0x5900 - 0x6fff : Relon mode trampolines * 0x7000 - 0x7fff : FWNMI data area * 0x8000 - .... : Common interrupt handlers, remaining early * setup code, rest of kernel. * * We could reclaim 0x4000-0x42ff for real mode trampolines if the space * is necessary. Until then it's more consistent to explicitly put VIRT_NONE * vectors there. */ OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) #ifdef CONFIG_PPC_POWERNV .globl start_real_trampolines .globl end_real_trampolines .globl start_virt_trampolines .globl end_virt_trampolines #endif #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * Data area reserved for FWNMI option. * This address (0x7000) is fixed by the RPA. * pseries and powernv need to keep the whole page from * 0x7000 to 0x8000 free for use by the firmware */ ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) OPEN_TEXT_SECTION(0x8000) #else OPEN_TEXT_SECTION(0x7000) #endif USE_FIXED_SECTION(real_vectors) /* * This is the start of the interrupt handlers for pSeries * This code runs with relocation off. * Code from here to __end_interrupts gets copied down to real * address 0x100 when we are running a relocatable kernel. * Therefore any relative branches in this section must only * branch to labels in this section. */ .globl __start_interrupts __start_interrupts: /** * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). * This is a synchronous interrupt invoked with the "scv" instruction. The * system call does not alter the HV bit, so it is directed to the OS. * * Handling: * scv instructions enter the kernel without changing EE, RI, ME, or HV. * In particular, this means we can take a maskable interrupt at any point * in the scv handler, which is unlike any other interrupt. This is solved * by treating the instruction addresses in the handler as being soft-masked, * by adding a SOFT_MASK_TABLE entry for them. * * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and * ensure scv is never executed with relocation off, which means AIL-0 * should never happen. * * Before leaving the following inside-__end_soft_masked text, at least of the * following must be true: * - MSR[PR]=1 (i.e., return to userspace) * - MSR_EE|MSR_RI is clear (no reentrant exceptions) * - Standard kernel environment is set up (stack, paca, etc) * * KVM: * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM * ensures that FSCR[SCV] is disabled whenever it has to force AIL off. * * Call convention: * * syscall register convention is in Documentation/powerpc/syscall64-abi.rst */ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) /* SCV 0 */ mr r9,r13 GET_PACA(r13) mflr r11 mfctr r12 li r10,IRQS_ALL_DISABLED stb r10,PACAIRQSOFTMASK(r13) #ifdef CONFIG_RELOCATABLE b system_call_vectored_tramp #else b system_call_vectored_common #endif nop /* SCV 1 - 127 */ .rept 127 mr r9,r13 GET_PACA(r13) mflr r11 mfctr r12 li r10,IRQS_ALL_DISABLED stb r10,PACAIRQSOFTMASK(r13) li r0,-1 /* cause failure */ #ifdef CONFIG_RELOCATABLE b system_call_vectored_sigill_tramp #else b system_call_vectored_sigill #endif .endr EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) // Treat scv vectors as soft-masked, see comment above. // Use absolute values rather than labels here, so they don't get relocated, // because this code runs unrelocated. SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000) #ifdef CONFIG_RELOCATABLE TRAMP_VIRT_BEGIN(system_call_vectored_tramp) __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines) mtctr r10 bctr TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines) mtctr r10 bctr #endif /* No virt vectors corresponding with 0x0..0x100 */ EXC_VIRT_NONE(0x4000, 0x100) /** * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI). * This is a non-maskable, asynchronous interrupt always taken in real-mode. * It is caused by: * - Wake from power-saving state, on powernv. * - An NMI from another CPU, triggered by firmware or hypercall. * - As crash/debug signal injected from BMC, firmware or hypervisor. * * Handling: * Power-save wakeup is the only performance critical path, so this is * determined quickly as possible first. In this case volatile registers * can be discarded and SPRs like CFAR don't need to be read. * * If not a powersave wakeup, then it's run as a regular interrupt, however * it uses its own stack and PACA save area to preserve the regular kernel * environment for debugging. * * This interrupt is not maskable, so triggering it when MSR[RI] is clear, * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely * correct to switch to virtual mode to run the regular interrupt handler * because it might be interrupted when the MMU is in a bad state (e.g., SLB * is clear). * * FWNMI: * PAPR specifies a "fwnmi" facility which sends the sreset to a different * entry point with a different register set up. Some hypervisors will * send the sreset to 0x100 in the guest if it is not fwnmi capable. * * KVM: * Unlike most SRR interrupts, this may be taken by the host while executing * in a guest, so a KVM test is required. KVM will pull the CPU out of guest * mode and then raise the sreset. */ INT_DEFINE_BEGIN(system_reset) IVEC=0x100 IAREA=PACA_EXNMI IVIRT=0 /* no virt entry point */ ISTACK=0 IKVM_REAL=1 INT_DEFINE_END(system_reset) EXC_REAL_BEGIN(system_reset, 0x100, 0x100) #ifdef CONFIG_PPC_P7_NAP /* * If running native on arch 2.06 or later, check if we are waking up * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 * bits 46:47. A non-0 value indicates that we are coming from a power * saving state. The idle wakeup handler initially runs in real mode, * but we branch to the 0xc000... address so we can turn on relocation * with mtmsrd later, after SPRs are restored. * * Careful to minimise cost for the fast path (idle wakeup) while * also avoiding clobbering CFAR for the debug path (non-idle). * * For the idle wake case volatile registers can be clobbered, which * is why we use those initially. If it turns out to not be an idle * wake, carefully put everything back the way it was, so we can use * common exception macros to handle it. */ BEGIN_FTR_SECTION SET_SCRATCH0(r13) GET_PACA(r13) std r3,PACA_EXNMI+0*8(r13) std r4,PACA_EXNMI+1*8(r13) std r5,PACA_EXNMI+2*8(r13) mfspr r3,SPRN_SRR1 mfocrf r4,0x80 rlwinm. r5,r3,47-31,30,31 bne+ system_reset_idle_wake /* Not powersave wakeup. Restore regs for regular interrupt handler. */ mtocrf 0x80,r4 ld r3,PACA_EXNMI+0*8(r13) ld r4,PACA_EXNMI+1*8(r13) ld r5,PACA_EXNMI+2*8(r13) GET_SCRATCH0(r13) END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif GEN_INT_ENTRY system_reset, virt=0 /* * In theory, we should not enable relocation here if it was disabled * in SRR1, because the MMU may not be configured to support it (e.g., * SLB may have been cleared). In practice, there should only be a few * small windows where that's the case, and sreset is considered to * be dangerous anyway. */ EXC_REAL_END(system_reset, 0x100, 0x100) EXC_VIRT_NONE(0x4100, 0x100) #ifdef CONFIG_PPC_P7_NAP TRAMP_REAL_BEGIN(system_reset_idle_wake) /* We are waking up from idle, so may clobber any volatile register */ cmpwi cr1,r5,2 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines) mtctr r12 bctr #endif #ifdef CONFIG_PPC_PSERIES /* * Vectors for the FWNMI option. Share common code. */ TRAMP_REAL_BEGIN(system_reset_fwnmi) GEN_INT_ENTRY system_reset, virt=0 #endif /* CONFIG_PPC_PSERIES */ EXC_COMMON_BEGIN(system_reset_common) __GEN_COMMON_ENTRY system_reset /* * Increment paca->in_nmi. When the interrupt entry wrapper later * enable MSR_RI, then SLB or MCE will be able to recover, but a nested * NMI will notice in_nmi and not recover because of the use of the NMI * stack. in_nmi reentrancy is tested in system_reset_exception. */ lhz r10,PACA_IN_NMI(r13) addi r10,r10,1 sth r10,PACA_IN_NMI(r13) mr r10,r1 ld r1,PACA_NMI_EMERG_SP(r13) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY system_reset addi r3,r1,STACK_FRAME_OVERHEAD bl system_reset_exception /* Clear MSR_RI before setting SRR0 and SRR1. */ li r9,0 mtmsrd r9,1 /* * MSR_RI is clear, now we can decrement paca->in_nmi. */ lhz r10,PACA_IN_NMI(r13) subi r10,r10,1 sth r10,PACA_IN_NMI(r13) kuap_kernel_restore r9, r10 EXCEPTION_RESTORE_REGS RFI_TO_USER_OR_KERNEL /** * Interrupt 0x200 - Machine Check Interrupt (MCE). * This is a non-maskable interrupt always taken in real-mode. It can be * synchronous or asynchronous, caused by hardware or software, and it may be * taken in a power-saving state. * * Handling: * Similarly to system reset, this uses its own stack and PACA save area, * the difference is re-entrancy is allowed on the machine check stack. * * machine_check_early is run in real mode, and carefully decodes the * machine check and tries to handle it (e.g., flush the SLB if there was an * error detected there), determines if it was recoverable and logs the * event. * * This early code does not "reconcile" irq soft-mask state like SRESET or * regular interrupts do, so irqs_disabled() among other things may not work * properly (irq disable/enable already doesn't work because irq tracing can * not work in real mode). * * Then, depending on the execution context when the interrupt is taken, there * are 3 main actions: * - Executing in kernel mode. The event is queued with irq_work, which means * it is handled when it is next safe to do so (i.e., the kernel has enabled * interrupts), which could be immediately when the interrupt returns. This * avoids nasty issues like switching to virtual mode when the MMU is in a * bad state, or when executing OPAL code. (SRESET is exposed to such issues, * but it has different priorities). Check to see if the CPU was in power * save, and return via the wake up code if it was. * * - Executing in user mode. machine_check_exception is run like a normal * interrupt handler, which processes the data generated by the early handler. * * - Executing in guest mode. The interrupt is run with its KVM test, and * branches to KVM to deal with. KVM may queue the event for the host * to report later. * * This interrupt is not maskable, so if it triggers when MSR[RI] is clear, * or SCRATCH0 is in use, it may cause a crash. * * KVM: * See SRESET. */ INT_DEFINE_BEGIN(machine_check_early) IVEC=0x200 IAREA=PACA_EXMC IVIRT=0 /* no virt entry point */ IREALMODE_COMMON=1 ISTACK=0 IDAR=1 IDSISR=1 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ INT_DEFINE_END(machine_check_early) INT_DEFINE_BEGIN(machine_check) IVEC=0x200 IAREA=PACA_EXMC IVIRT=0 /* no virt entry point */ IDAR=1 IDSISR=1 IKVM_REAL=1 INT_DEFINE_END(machine_check) EXC_REAL_BEGIN(machine_check, 0x200, 0x100) EARLY_BOOT_FIXUP GEN_INT_ENTRY machine_check_early, virt=0 EXC_REAL_END(machine_check, 0x200, 0x100) EXC_VIRT_NONE(0x4200, 0x100) #ifdef CONFIG_PPC_PSERIES TRAMP_REAL_BEGIN(machine_check_fwnmi) /* See comment at machine_check exception, don't turn on RI */ GEN_INT_ENTRY machine_check_early, virt=0 #endif #define MACHINE_CHECK_HANDLER_WINDUP \ /* Clear MSR_RI before setting SRR0 and SRR1. */\ li r9,0; \ mtmsrd r9,1; /* Clear MSR_RI */ \ /* Decrement paca->in_mce now RI is clear. */ \ lhz r12,PACA_IN_MCE(r13); \ subi r12,r12,1; \ sth r12,PACA_IN_MCE(r13); \ EXCEPTION_RESTORE_REGS EXC_COMMON_BEGIN(machine_check_early_common) __GEN_REALMODE_COMMON_ENTRY machine_check_early /* * Switch to mc_emergency stack and handle re-entrancy (we limit * the nested MCE upto level 4 to avoid stack overflow). * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 * * We use paca->in_mce to check whether this is the first entry or * nested machine check. We increment paca->in_mce to track nested * machine checks. * * If this is the first entry then set stack pointer to * paca->mc_emergency_sp, otherwise r1 is already pointing to * stack frame on mc_emergency stack. * * NOTE: We are here with MSR_ME=0 (off), which means we risk a * checkstop if we get another machine check exception before we do * rfid with MSR_ME=1. * * This interrupt can wake directly from idle. If that is the case, * the machine check is handled then the idle wakeup code is called * to restore state. */ lhz r10,PACA_IN_MCE(r13) cmpwi r10,0 /* Are we in nested machine check */ cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */ addi r10,r10,1 /* increment paca->in_mce */ sth r10,PACA_IN_MCE(r13) mr r10,r1 /* Save r1 */ bne 1f /* First machine check entry */ ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 1: /* Limit nested MCE to level 4 to avoid stack overflow */ bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ __GEN_COMMON_BODY machine_check_early BEGIN_FTR_SECTION bl enable_machine_check END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_FTR_SECTION bl machine_check_early_boot END_FTR_SECTION(0, 1) // nop out after boot bl machine_check_early std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) #ifdef CONFIG_PPC_P7_NAP /* * Check if thread was in power saving mode. We come here when any * of the following is true: * a. thread wasn't in power saving mode * b. thread was in power saving mode with no state loss, * supervisor state loss or hypervisor state loss. * * Go back to nap/sleep/winkle mode again if (b) is true. */ BEGIN_FTR_SECTION rlwinm. r11,r12,47-31,30,31 bne machine_check_idle_common END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif #ifdef CONFIG_KVM_BOOK3S_64_HANDLER /* * Check if we are coming from guest. If yes, then run the normal * exception handler which will take the * machine_check_kvm->kvm_interrupt branch to deliver the MC event * to guest. */ lbz r11,HSTATE_IN_GUEST(r13) cmpwi r11,0 /* Check if coming from guest */ bne mce_deliver /* continue if we are. */ #endif /* * Check if we are coming from userspace. If yes, then run the normal * exception handler which will deliver the MC event to this kernel. */ andi. r11,r12,MSR_PR /* See if coming from user. */ bne mce_deliver /* continue in V mode if we are. */ /* * At this point we are coming from kernel context. * Queue up the MCE event and return from the interrupt. * But before that, check if this is an un-recoverable exception. * If yes, then stay on emergency stack and panic. */ andi. r11,r12,MSR_RI beq unrecoverable_mce /* * Check if we have successfully handled/recovered from error, if not * then stay on emergency stack and panic. */ ld r3,RESULT(r1) /* Load result */ cmpdi r3,0 /* see if we handled MCE successfully */ beq unrecoverable_mce /* if !handled then panic */ /* * Return from MC interrupt. * Queue up the MCE event so that we can log it later, while * returning from kernel or opal call. */ bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP RFI_TO_KERNEL mce_deliver: /* * This is a host user or guest MCE. Restore all registers, then * run the "late" handler. For host user, this will run the * machine_check_exception handler in virtual mode like a normal * interrupt handler. For guest, this will trigger the KVM test * and branch to the KVM interrupt similarly to other interrupts. */ BEGIN_FTR_SECTION ld r10,ORIG_GPR3(r1) mtspr SPRN_CFAR,r10 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) MACHINE_CHECK_HANDLER_WINDUP GEN_INT_ENTRY machine_check, virt=0 EXC_COMMON_BEGIN(machine_check_common) /* * Machine check is different because we use a different * save area: PACA_EXMC instead of PACA_EXGEN. */ GEN_COMMON machine_check addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_exception_async b interrupt_return_srr #ifdef CONFIG_PPC_P7_NAP /* * This is an idle wakeup. Low level machine check has already been * done. Queue the event then call the idle code to do the wake up. */ EXC_COMMON_BEGIN(machine_check_idle_common) bl machine_check_queue_event /* * GPR-loss wakeups are relatively straightforward, because the * idle sleep code has saved all non-volatile registers on its * own stack, and r1 in PACAR1. * * For no-loss wakeups the r1 and lr registers used by the * early machine check handler have to be restored first. r2 is * the kernel TOC, so no need to restore it. * * Then decrement MCE nesting after finishing with the stack. */ ld r3,_MSR(r1) ld r4,_LINK(r1) ld r1,GPR1(r1) lhz r11,PACA_IN_MCE(r13) subi r11,r11,1 sth r11,PACA_IN_MCE(r13) mtlr r4 rlwinm r10,r3,47-31,30,31 cmpwi cr1,r10,2 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ b idle_return_gpr_loss #endif EXC_COMMON_BEGIN(unrecoverable_mce) /* * We are going down. But there are chances that we might get hit by * another MCE during panic path and we may run into unstable state * with no way out. Hence, turn ME bit off while going down, so that * when another MCE is hit during panic path, system will checkstop * and hypervisor will get restarted cleanly by SP. */ BEGIN_FTR_SECTION li r10,0 /* clear MSR_RI */ mtmsrd r10,1 bl disable_machine_check END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) ld r10,PACAKMSR(r13) li r3,MSR_ME andc r10,r10,r3 mtmsrd r10 lhz r12,PACA_IN_MCE(r13) subi r12,r12,1 sth r12,PACA_IN_MCE(r13) /* * Invoke machine_check_exception to print MCE event and panic. * This is the NMI version of the handler because we are called from * the early handler which is a true NMI. */ addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_exception /* * We will not reach here. Even if we did, there is no way out. * Call unrecoverable_exception and die. */ addi r3,r1,STACK_FRAME_OVERHEAD bl unrecoverable_exception b . /** * Interrupt 0x300 - Data Storage Interrupt (DSI). * This is a synchronous interrupt generated due to a data access exception, * e.g., a load orstore which does not have a valid page table entry with * permissions. DAWR matches also fault here, as do RC updates, and minor misc * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc. * * Handling: * - Hash MMU * Go to do_hash_fault, which attempts to fill the HPT from an entry in the * Linux page table. Hash faults can hit in kernel mode in a fairly * arbitrary state (e.g., interrupts disabled, locks held) when accessing * "non-bolted" regions, e.g., vmalloc space. However these should always be * backed by Linux page table entries. * * If no entry is found the Linux page fault handler is invoked (by * do_hash_fault). Linux page faults can happen in kernel mode due to user * copy operations of course. * * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest * MMU context, which may cause a DSI in the host, which must go to the * KVM handler. MSR[IR] is not enabled, so the real-mode handler will * always be used regardless of AIL setting. * * - Radix MMU * The hardware loads from the Linux page table directly, so a fault goes * immediately to Linux page fault. * * Conditions like DAWR match are handled on the way in to Linux page fault. */ INT_DEFINE_BEGIN(data_access) IVEC=0x300 IDAR=1 IDSISR=1 IKVM_REAL=1 INT_DEFINE_END(data_access) EXC_REAL_BEGIN(data_access, 0x300, 0x80) GEN_INT_ENTRY data_access, virt=0 EXC_REAL_END(data_access, 0x300, 0x80) EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) GEN_INT_ENTRY data_access, virt=1 EXC_VIRT_END(data_access, 0x4300, 0x80) EXC_COMMON_BEGIN(data_access_common) GEN_COMMON data_access ld r4,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD andis. r0,r4,DSISR_DABRMATCH@h bne- 1f #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION bl do_hash_fault MMU_FTR_SECTION_ELSE bl do_page_fault ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) #else bl do_page_fault #endif b interrupt_return_srr 1: bl do_break /* * do_break() may have changed the NV GPRS while handling a breakpoint. * If so, we need to restore them with their updated values. */ REST_NVGPRS(r1) b interrupt_return_srr /** * Interrupt 0x380 - Data Segment Interrupt (DSLB). * This is a synchronous interrupt in response to an MMU fault missing SLB * entry for HPT, or an address outside RPT translation range. * * Handling: * - HPT: * This refills the SLB, or reports an access fault similarly to a bad page * fault. When coming from user-mode, the SLB handler may access any kernel * data, though it may itself take a DSLB. When coming from kernel mode, * recursive faults must be avoided so access is restricted to the kernel * image text/data, kernel stack, and any data allocated below * ppc64_bolted_size (first segment). The kernel handler must avoid stomping * on user-handler data structures. * * KVM: Same as 0x300, DSLB must test for KVM guest. */ INT_DEFINE_BEGIN(data_access_slb) IVEC=0x380 IDAR=1 IKVM_REAL=1 INT_DEFINE_END(data_access_slb) EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) GEN_INT_ENTRY data_access_slb, virt=0 EXC_REAL_END(data_access_slb, 0x380, 0x80) EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) GEN_INT_ENTRY data_access_slb, virt=1 EXC_VIRT_END(data_access_slb, 0x4380, 0x80) EXC_COMMON_BEGIN(data_access_slb_common) GEN_COMMON data_access_slb #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ addi r3,r1,STACK_FRAME_OVERHEAD bl do_slb_fault cmpdi r3,0 bne- 1f b fast_interrupt_return_srr 1: /* Error case */ MMU_FTR_SECTION_ELSE /* Radix case, access is outside page table range */ li r3,-EFAULT ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) #else li r3,-EFAULT #endif std r3,RESULT(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_bad_segment_interrupt b interrupt_return_srr /** * Interrupt 0x400 - Instruction Storage Interrupt (ISI). * This is a synchronous interrupt in response to an MMU fault due to an * instruction fetch. * * Handling: * Similar to DSI, though in response to fetch. The faulting address is found * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR). */ INT_DEFINE_BEGIN(instruction_access) IVEC=0x400 IISIDE=1 IDAR=1 IDSISR=1 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(instruction_access) EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) GEN_INT_ENTRY instruction_access, virt=0 EXC_REAL_END(instruction_access, 0x400, 0x80) EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) GEN_INT_ENTRY instruction_access, virt=1 EXC_VIRT_END(instruction_access, 0x4400, 0x80) EXC_COMMON_BEGIN(instruction_access_common) GEN_COMMON instruction_access addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION bl do_hash_fault MMU_FTR_SECTION_ELSE bl do_page_fault ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) #else bl do_page_fault #endif b interrupt_return_srr /** * Interrupt 0x480 - Instruction Segment Interrupt (ISLB). * This is a synchronous interrupt in response to an MMU fault due to an * instruction fetch. * * Handling: * Similar to DSLB, though in response to fetch. The faulting address is found * in SRR0 (rather than DAR). */ INT_DEFINE_BEGIN(instruction_access_slb) IVEC=0x480 IISIDE=1 IDAR=1 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(instruction_access_slb) EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) GEN_INT_ENTRY instruction_access_slb, virt=0 EXC_REAL_END(instruction_access_slb, 0x480, 0x80) EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) GEN_INT_ENTRY instruction_access_slb, virt=1 EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) EXC_COMMON_BEGIN(instruction_access_slb_common) GEN_COMMON instruction_access_slb #ifdef CONFIG_PPC_64S_HASH_MMU BEGIN_MMU_FTR_SECTION /* HPT case, do SLB fault */ addi r3,r1,STACK_FRAME_OVERHEAD bl do_slb_fault cmpdi r3,0 bne- 1f b fast_interrupt_return_srr 1: /* Error case */ MMU_FTR_SECTION_ELSE /* Radix case, access is outside page table range */ li r3,-EFAULT ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) #else li r3,-EFAULT #endif std r3,RESULT(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_bad_segment_interrupt b interrupt_return_srr /** * Interrupt 0x500 - External Interrupt. * This is an asynchronous maskable interrupt in response to an "external * exception" from the interrupt controller or hypervisor (e.g., device * interrupt). It is maskable in hardware by clearing MSR[EE], and * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()). * * When running in HV mode, Linux sets up the LPCR[LPES] bit such that * interrupts are delivered with HSRR registers, guests use SRRs, which * reqiures IHSRR_IF_HVMODE. * * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that * external interrupts are delivered as Hypervisor Virtualization Interrupts * rather than External Interrupts. * * Handling: * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead, * because registers at the time of the interrupt are not so important as it is * asynchronous. * * If soft masked, the masked handler will note the pending interrupt for * replay, and clear MSR[EE] in the interrupted context. * * CFAR is not required because this is an asynchronous interrupt that in * general won't have much bearing on the state of the CPU, with the possible * exception of crash/debug IPIs, but those are generally moving to use SRESET * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case * it may be exiting the guest and need CFAR to be saved. */ INT_DEFINE_BEGIN(hardware_interrupt) IVEC=0x500 IHSRR_IF_HVMODE=1 IMASK=IRQS_DISABLED IKVM_REAL=1 IKVM_VIRT=1 ICFAR=0 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ICFAR_IF_HVMODE=1 #endif INT_DEFINE_END(hardware_interrupt) EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) GEN_INT_ENTRY hardware_interrupt, virt=0 EXC_REAL_END(hardware_interrupt, 0x500, 0x100) EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) GEN_INT_ENTRY hardware_interrupt, virt=1 EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) EXC_COMMON_BEGIN(hardware_interrupt_common) GEN_COMMON hardware_interrupt addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ BEGIN_FTR_SECTION b interrupt_return_hsrr FTR_SECTION_ELSE b interrupt_return_srr ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) /** * Interrupt 0x600 - Alignment Interrupt * This is a synchronous interrupt in response to data alignment fault. */ INT_DEFINE_BEGIN(alignment) IVEC=0x600 IDAR=1 IDSISR=1 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(alignment) EXC_REAL_BEGIN(alignment, 0x600, 0x100) GEN_INT_ENTRY alignment, virt=0 EXC_REAL_END(alignment, 0x600, 0x100) EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) GEN_INT_ENTRY alignment, virt=1 EXC_VIRT_END(alignment, 0x4600, 0x100) EXC_COMMON_BEGIN(alignment_common) GEN_COMMON alignment addi r3,r1,STACK_FRAME_OVERHEAD bl alignment_exception REST_NVGPRS(r1) /* instruction emulation may change GPRs */ b interrupt_return_srr /** * Interrupt 0x700 - Program Interrupt (program check). * This is a synchronous interrupt in response to various instruction faults: * traps, privilege errors, TM errors, floating point exceptions. * * Handling: * This interrupt may use the "emergency stack" in some cases when being taken * from kernel context, which complicates handling. */ INT_DEFINE_BEGIN(program_check) IVEC=0x700 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(program_check) EXC_REAL_BEGIN(program_check, 0x700, 0x100) EARLY_BOOT_FIXUP GEN_INT_ENTRY program_check, virt=0 EXC_REAL_END(program_check, 0x700, 0x100) EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) GEN_INT_ENTRY program_check, virt=1 EXC_VIRT_END(program_check, 0x4700, 0x100) EXC_COMMON_BEGIN(program_check_common) __GEN_COMMON_ENTRY program_check /* * It's possible to receive a TM Bad Thing type program check with * userspace register values (in particular r1), but with SRR1 reporting * that we came from the kernel. Normally that would confuse the bad * stack logic, and we would report a bad kernel stack pointer. Instead * we switch to the emergency stack if we're taking a TM Bad Thing from * the kernel. */ andi. r10,r12,MSR_PR bne .Lnormal_stack /* If userspace, go normal path */ andis. r10,r12,(SRR1_PROGTM)@h bne .Lemergency_stack /* If TM, emergency */ cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ blt .Lnormal_stack /* normal path if not */ /* Use the emergency stack */ .Lemergency_stack: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ /* 3 in EXCEPTION_PROLOG_COMMON */ mr r10,r1 /* Save r1 */ ld r1,PACAEMERGSP(r13) /* Use emergency stack */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ __ISTACK(program_check)=0 __GEN_COMMON_BODY program_check b .Ldo_program_check .Lnormal_stack: __ISTACK(program_check)=1 __GEN_COMMON_BODY program_check .Ldo_program_check: addi r3,r1,STACK_FRAME_OVERHEAD bl program_check_exception REST_NVGPRS(r1) /* instruction emulation may change GPRs */ b interrupt_return_srr /* * Interrupt 0x800 - Floating-Point Unavailable Interrupt. * This is a synchronous interrupt in response to executing an fp instruction * with MSR[FP]=0. * * Handling: * This will load FP registers and enable the FP bit if coming from userspace, * otherwise report a bad kernel use of FP. */ INT_DEFINE_BEGIN(fp_unavailable) IVEC=0x800 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(fp_unavailable) EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100) GEN_INT_ENTRY fp_unavailable, virt=0 EXC_REAL_END(fp_unavailable, 0x800, 0x100) EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100) GEN_INT_ENTRY fp_unavailable, virt=1 EXC_VIRT_END(fp_unavailable, 0x4800, 0x100) EXC_COMMON_BEGIN(fp_unavailable_common) GEN_COMMON fp_unavailable bne 1f /* if from user, just load it up */ addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_fp_unavailable_exception 0: trap EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 1: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in * transaction), go do TM stuff */ rldicl. r0, r12, (64-MSR_TS_LG), (64-2) bne- 2f END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif bl load_up_fpu b fast_interrupt_return_srr #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ addi r3,r1,STACK_FRAME_OVERHEAD bl fp_unavailable_tm b interrupt_return_srr #endif /** * Interrupt 0x900 - Decrementer Interrupt. * This is an asynchronous interrupt in response to a decrementer exception * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e., * local_irq_disable()). * * Handling: * This calls into Linux timer handler. NVGPRs are not saved (see 0x500). * * If soft masked, the masked handler will note the pending interrupt for * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled * in the interrupted context. * If PPC_WATCHDOG is configured, the soft masked handler will actually set * things back up to run soft_nmi_interrupt as a regular interrupt handler * on the emergency stack. * * CFAR is not required because this is asynchronous (see hardware_interrupt). * A watchdog interrupt may like to have CFAR, but usually the interesting * branch is long gone by that point (e.g., infinite loop). */ INT_DEFINE_BEGIN(decrementer) IVEC=0x900 IMASK=IRQS_DISABLED #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif ICFAR=0 INT_DEFINE_END(decrementer) EXC_REAL_BEGIN(decrementer, 0x900, 0x80) GEN_INT_ENTRY decrementer, virt=0 EXC_REAL_END(decrementer, 0x900, 0x80) EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) GEN_INT_ENTRY decrementer, virt=1 EXC_VIRT_END(decrementer, 0x4900, 0x80) EXC_COMMON_BEGIN(decrementer_common) GEN_COMMON decrementer addi r3,r1,STACK_FRAME_OVERHEAD bl timer_interrupt b interrupt_return_srr /** * Interrupt 0x980 - Hypervisor Decrementer Interrupt. * This is an asynchronous interrupt, similar to 0x900 but for the HDEC * register. * * Handling: * Linux does not use this outside KVM where it's used to keep a host timer * while the guest is given control of DEC. It should normally be caught by * the KVM test and routed there. */ INT_DEFINE_BEGIN(hdecrementer) IVEC=0x980 IHSRR=1 ISTACK=0 IKVM_REAL=1 IKVM_VIRT=1 INT_DEFINE_END(hdecrementer) EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) GEN_INT_ENTRY hdecrementer, virt=0 EXC_REAL_END(hdecrementer, 0x980, 0x80) EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) GEN_INT_ENTRY hdecrementer, virt=1 EXC_VIRT_END(hdecrementer, 0x4980, 0x80) EXC_COMMON_BEGIN(hdecrementer_common) __GEN_COMMON_ENTRY hdecrementer /* * Hypervisor decrementer interrupts not caught by the KVM test * shouldn't occur but are sometimes left pending on exit from a KVM * guest. We don't need to do anything to clear them, as they are * edge-triggered. * * Be careful to avoid touching the kernel stack. */ li r10,0 stb r10,PACAHSRR_VALID(r13) ld r10,PACA_EXGEN+EX_CTR(r13) mtctr r10 mtcrf 0x80,r9 ld r9,PACA_EXGEN+EX_R9(r13) ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) ld r13,PACA_EXGEN+EX_R13(r13) HRFI_TO_KERNEL /** * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt. * This is an asynchronous interrupt in response to a msgsndp doorbell. * It is maskable in hardware by clearing MSR[EE], and soft-maskable with * IRQS_DISABLED mask (i.e., local_irq_disable()). * * Handling: * Guests may use this for IPIs between threads in a core if the * hypervisor supports it. NVGPRS are not saved (see 0x500). * * If soft masked, the masked handler will note the pending interrupt for * replay, leaving MSR[EE] enabled in the interrupted context because the * doorbells are edge triggered. * * CFAR is not required, similarly to hardware_interrupt. */ INT_DEFINE_BEGIN(doorbell_super) IVEC=0xa00 IMASK=IRQS_DISABLED #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif ICFAR=0 INT_DEFINE_END(doorbell_super) EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100) GEN_INT_ENTRY doorbell_super, virt=0 EXC_REAL_END(doorbell_super, 0xa00, 0x100) EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100) GEN_INT_ENTRY doorbell_super, virt=1 EXC_VIRT_END(doorbell_super, 0x4a00, 0x100) EXC_COMMON_BEGIN(doorbell_super_common) GEN_COMMON doorbell_super addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else bl unknown_async_exception #endif b interrupt_return_srr EXC_REAL_NONE(0xb00, 0x100) EXC_VIRT_NONE(0x4b00, 0x100) /** * Interrupt 0xc00 - System Call Interrupt (syscall, hcall). * This is a synchronous interrupt invoked with the "sc" instruction. The * system call is invoked with "sc 0" and does not alter the HV bit, so it * is directed to the currently running OS. The hypercall is invoked with * "sc 1" and it sets HV=1, so it elevates to hypervisor. * * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to * 0x4c00 virtual mode. * * Handling: * If the KVM test fires then it was due to a hypercall and is accordingly * routed to KVM. Otherwise this executes a normal Linux system call. * * Call convention: * * syscall and hypercalls register conventions are documented in * Documentation/powerpc/syscall64-abi.rst and * Documentation/powerpc/papr_hcalls.rst respectively. * * The intersection of volatile registers that don't contain possible * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry * without saving, though xer is not a good idea to use, as hardware may * interpret some bits so it may be costly to change them. */ INT_DEFINE_BEGIN(system_call) IVEC=0xc00 IKVM_REAL=1 IKVM_VIRT=1 ICFAR=0 INT_DEFINE_END(system_call) .macro SYSTEM_CALL virt #ifdef CONFIG_KVM_BOOK3S_64_HANDLER /* * There is a little bit of juggling to get syscall and hcall * working well. Save r13 in ctr to avoid using SPRG scratch * register. * * Userspace syscalls have already saved the PPR, hcalls must save * it before setting HMT_MEDIUM. */ mtctr r13 GET_PACA(r13) std r10,PACA_EXGEN+EX_R10(r13) INTERRUPT_TO_KERNEL KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */ mfctr r9 #else mr r9,r13 GET_PACA(r13) INTERRUPT_TO_KERNEL #endif #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH BEGIN_FTR_SECTION cmpdi r0,0x1ebe beq- 1f END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) #endif /* We reach here with PACA in r13, r13 in r9. */ mfspr r11,SPRN_SRR0 mfspr r12,SPRN_SRR1 HMT_MEDIUM .if ! \virt __LOAD_HANDLER(r10, system_call_common_real, real_vectors) mtctr r10 bctr .else #ifdef CONFIG_RELOCATABLE __LOAD_HANDLER(r10, system_call_common, virt_vectors) mtctr r10 bctr #else b system_call_common #endif .endif #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH /* Fast LE/BE switch system call */ 1: mfspr r12,SPRN_SRR1 xori r12,r12,MSR_LE mtspr SPRN_SRR1,r12 mr r13,r9 RFI_TO_USER /* return to userspace */ b . /* prevent speculative execution */ #endif .endm EXC_REAL_BEGIN(system_call, 0xc00, 0x100) SYSTEM_CALL 0 EXC_REAL_END(system_call, 0xc00, 0x100) EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) SYSTEM_CALL 1 EXC_VIRT_END(system_call, 0x4c00, 0x100) #ifdef CONFIG_KVM_BOOK3S_64_HANDLER TRAMP_REAL_BEGIN(kvm_hcall) std r9,PACA_EXGEN+EX_R9(r13) std r11,PACA_EXGEN+EX_R11(r13) std r12,PACA_EXGEN+EX_R12(r13) mfcr r9 mfctr r10 std r10,PACA_EXGEN+EX_R13(r13) li r10,0 std r10,PACA_EXGEN+EX_CFAR(r13) std r10,PACA_EXGEN+EX_CTR(r13) /* * Save the PPR (on systems that support it) before changing to * HMT_MEDIUM. That allows the KVM code to save that value into the * guest state (it is the guest's PPR value). */ BEGIN_FTR_SECTION mfspr r10,SPRN_PPR std r10,PACA_EXGEN+EX_PPR(r13) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) HMT_MEDIUM #ifdef CONFIG_RELOCATABLE /* * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives * outside the head section. */ __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines) mtctr r10 bctr #else b kvmppc_hcall #endif #endif /** * Interrupt 0xd00 - Trace Interrupt. * This is a synchronous interrupt in response to instruction step or * breakpoint faults. */ INT_DEFINE_BEGIN(single_step) IVEC=0xd00 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(single_step) EXC_REAL_BEGIN(single_step, 0xd00, 0x100) GEN_INT_ENTRY single_step, virt=0 EXC_REAL_END(single_step, 0xd00, 0x100) EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100) GEN_INT_ENTRY single_step, virt=1 EXC_VIRT_END(single_step, 0x4d00, 0x100) EXC_COMMON_BEGIN(single_step_common) GEN_COMMON single_step addi r3,r1,STACK_FRAME_OVERHEAD bl single_step_exception b interrupt_return_srr /** * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI). * This is a synchronous interrupt in response to an MMU fault caused by a * guest data access. * * Handling: * This should always get routed to KVM. In radix MMU mode, this is caused * by a guest nested radix access that can't be performed due to the * partition scope page table. In hash mode, this can be caused by guests * running with translation disabled (virtual real mode) or with VPM enabled. * KVM will update the page table structures or disallow the access. */ INT_DEFINE_BEGIN(h_data_storage) IVEC=0xe00 IHSRR=1 IDAR=1 IDSISR=1 IKVM_REAL=1 IKVM_VIRT=1 INT_DEFINE_END(h_data_storage) EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20) GEN_INT_ENTRY h_data_storage, virt=0, ool=1 EXC_REAL_END(h_data_storage, 0xe00, 0x20) EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20) GEN_INT_ENTRY h_data_storage, virt=1, ool=1 EXC_VIRT_END(h_data_storage, 0x4e00, 0x20) EXC_COMMON_BEGIN(h_data_storage_common) GEN_COMMON h_data_storage addi r3,r1,STACK_FRAME_OVERHEAD BEGIN_MMU_FTR_SECTION bl do_bad_page_fault_segv MMU_FTR_SECTION_ELSE bl unknown_exception ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX) b interrupt_return_hsrr /** * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI). * This is a synchronous interrupt in response to an MMU fault caused by a * guest instruction fetch, similar to HDSI. */ INT_DEFINE_BEGIN(h_instr_storage) IVEC=0xe20 IHSRR=1 IKVM_REAL=1 IKVM_VIRT=1 INT_DEFINE_END(h_instr_storage) EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20) GEN_INT_ENTRY h_instr_storage, virt=0, ool=1 EXC_REAL_END(h_instr_storage, 0xe20, 0x20) EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20) GEN_INT_ENTRY h_instr_storage, virt=1, ool=1 EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20) EXC_COMMON_BEGIN(h_instr_storage_common) GEN_COMMON h_instr_storage addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception b interrupt_return_hsrr /** * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt. */ INT_DEFINE_BEGIN(emulation_assist) IVEC=0xe40 IHSRR=1 IKVM_REAL=1 IKVM_VIRT=1 INT_DEFINE_END(emulation_assist) EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20) GEN_INT_ENTRY emulation_assist, virt=0, ool=1 EXC_REAL_END(emulation_assist, 0xe40, 0x20) EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20) GEN_INT_ENTRY emulation_assist, virt=1, ool=1 EXC_VIRT_END(emulation_assist, 0x4e40, 0x20) EXC_COMMON_BEGIN(emulation_assist_common) GEN_COMMON emulation_assist addi r3,r1,STACK_FRAME_OVERHEAD bl emulation_assist_interrupt REST_NVGPRS(r1) /* instruction emulation may change GPRs */ b interrupt_return_hsrr /** * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI). * This is an asynchronous interrupt caused by a Hypervisor Maintenance * Exception. It is always taken in real mode but uses HSRR registers * unlike SRESET and MCE. * * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable * with IRQS_DISABLED mask (i.e., local_irq_disable()). * * Handling: * This is a special case, this is handled similarly to machine checks, with an * initial real mode handler that is not soft-masked, which attempts to fix the * problem. Then a regular handler which is soft-maskable and reports the * problem. * * The emergency stack is used for the early real mode handler. * * XXX: unclear why MCE and HMI schemes could not be made common, e.g., * either use soft-masking for the MCE, or use irq_work for the HMI. * * KVM: * Unlike MCE, this calls into KVM without calling the real mode handler * first. */ INT_DEFINE_BEGIN(hmi_exception_early) IVEC=0xe60 IHSRR=1 IREALMODE_COMMON=1 ISTACK=0 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */ IKVM_REAL=1 INT_DEFINE_END(hmi_exception_early) INT_DEFINE_BEGIN(hmi_exception) IVEC=0xe60 IHSRR=1 IMASK=IRQS_DISABLED IKVM_REAL=1 INT_DEFINE_END(hmi_exception) EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20) GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1 EXC_REAL_END(hmi_exception, 0xe60, 0x20) EXC_VIRT_NONE(0x4e60, 0x20) EXC_COMMON_BEGIN(hmi_exception_early_common) __GEN_REALMODE_COMMON_ENTRY hmi_exception_early mr r10,r1 /* Save r1 */ ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ __GEN_COMMON_BODY hmi_exception_early addi r3,r1,STACK_FRAME_OVERHEAD bl hmi_exception_realmode cmpdi cr0,r3,0 bne 1f EXCEPTION_RESTORE_REGS hsrr=1 HRFI_TO_USER_OR_KERNEL 1: /* * Go to virtual mode and pull the HMI event information from * firmware. */ EXCEPTION_RESTORE_REGS hsrr=1 GEN_INT_ENTRY hmi_exception, virt=0 EXC_COMMON_BEGIN(hmi_exception_common) GEN_COMMON hmi_exception addi r3,r1,STACK_FRAME_OVERHEAD bl handle_hmi_exception b interrupt_return_hsrr /** * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt. * This is an asynchronous interrupt in response to a msgsnd doorbell. * Similar to the 0xa00 doorbell but for host rather than guest. * * CFAR is not required (similar to doorbell_interrupt), unless KVM HV * is enabled, in which case it may be a guest exit. Most PowerNV kernels * include KVM support so it would be nice if this could be dynamically * patched out if KVM was not currently running any guests. */ INT_DEFINE_BEGIN(h_doorbell) IVEC=0xe80 IHSRR=1 IMASK=IRQS_DISABLED IKVM_REAL=1 IKVM_VIRT=1 #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE ICFAR=0 #endif INT_DEFINE_END(h_doorbell) EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20) GEN_INT_ENTRY h_doorbell, virt=0, ool=1 EXC_REAL_END(h_doorbell, 0xe80, 0x20) EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20) GEN_INT_ENTRY h_doorbell, virt=1, ool=1 EXC_VIRT_END(h_doorbell, 0x4e80, 0x20) EXC_COMMON_BEGIN(h_doorbell_common) GEN_COMMON h_doorbell addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_DOORBELL bl doorbell_exception #else bl unknown_async_exception #endif b interrupt_return_hsrr /** * Interrupt 0xea0 - Hypervisor Virtualization Interrupt. * This is an asynchronous interrupt in response to an "external exception". * Similar to 0x500 but for host only. * * Like h_doorbell, CFAR is only required for KVM HV because this can be * a guest exit. */ INT_DEFINE_BEGIN(h_virt_irq) IVEC=0xea0 IHSRR=1 IMASK=IRQS_DISABLED IKVM_REAL=1 IKVM_VIRT=1 #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE ICFAR=0 #endif INT_DEFINE_END(h_virt_irq) EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20) GEN_INT_ENTRY h_virt_irq, virt=0, ool=1 EXC_REAL_END(h_virt_irq, 0xea0, 0x20) EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20) GEN_INT_ENTRY h_virt_irq, virt=1, ool=1 EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20) EXC_COMMON_BEGIN(h_virt_irq_common) GEN_COMMON h_virt_irq addi r3,r1,STACK_FRAME_OVERHEAD bl do_IRQ b interrupt_return_hsrr EXC_REAL_NONE(0xec0, 0x20) EXC_VIRT_NONE(0x4ec0, 0x20) EXC_REAL_NONE(0xee0, 0x20) EXC_VIRT_NONE(0x4ee0, 0x20) /* * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU). * This is an asynchronous interrupt in response to a PMU exception. * It is maskable in hardware by clearing MSR[EE], and soft-maskable with * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()). * * Handling: * This calls into the perf subsystem. * * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it * runs under local_irq_disable. However it may be soft-masked in * powerpc-specific code. * * If soft masked, the masked handler will note the pending interrupt for * replay, and clear MSR[EE] in the interrupted context. * * CFAR is not used by perf interrupts so not required. */ INT_DEFINE_BEGIN(performance_monitor) IVEC=0xf00 IMASK=IRQS_PMI_DISABLED #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif ICFAR=0 INT_DEFINE_END(performance_monitor) EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20) GEN_INT_ENTRY performance_monitor, virt=0, ool=1 EXC_REAL_END(performance_monitor, 0xf00, 0x20) EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20) GEN_INT_ENTRY performance_monitor, virt=1, ool=1 EXC_VIRT_END(performance_monitor, 0x4f00, 0x20) EXC_COMMON_BEGIN(performance_monitor_common) GEN_COMMON performance_monitor addi r3,r1,STACK_FRAME_OVERHEAD lbz r4,PACAIRQSOFTMASK(r13) cmpdi r4,IRQS_ENABLED bne 1f bl performance_monitor_exception_async b interrupt_return_srr 1: bl performance_monitor_exception_nmi /* Clear MSR_RI before setting SRR0 and SRR1. */ li r9,0 mtmsrd r9,1 kuap_kernel_restore r9, r10 EXCEPTION_RESTORE_REGS hsrr=0 RFI_TO_KERNEL /** * Interrupt 0xf20 - Vector Unavailable Interrupt. * This is a synchronous interrupt in response to * executing a vector (or altivec) instruction with MSR[VEC]=0. * Similar to FP unavailable. */ INT_DEFINE_BEGIN(altivec_unavailable) IVEC=0xf20 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(altivec_unavailable) EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20) GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1 EXC_REAL_END(altivec_unavailable, 0xf20, 0x20) EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20) GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1 EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20) EXC_COMMON_BEGIN(altivec_unavailable_common) GEN_COMMON altivec_unavailable #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION beq 1f #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION_NESTED(69) /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in * transaction), go do TM stuff */ rldicl. r0, r12, (64-MSR_TS_LG), (64-2) bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif bl load_up_altivec b fast_interrupt_return_srr #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_tm b interrupt_return_srr #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_exception b interrupt_return_srr /** * Interrupt 0xf40 - VSX Unavailable Interrupt. * This is a synchronous interrupt in response to * executing a VSX instruction with MSR[VSX]=0. * Similar to FP unavailable. */ INT_DEFINE_BEGIN(vsx_unavailable) IVEC=0xf40 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(vsx_unavailable) EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20) GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1 EXC_REAL_END(vsx_unavailable, 0xf40, 0x20) EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20) GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1 EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20) EXC_COMMON_BEGIN(vsx_unavailable_common) GEN_COMMON vsx_unavailable #ifdef CONFIG_VSX BEGIN_FTR_SECTION beq 1f #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION_NESTED(69) /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in * transaction), go do TM stuff */ rldicl. r0, r12, (64-MSR_TS_LG), (64-2) bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif b load_up_vsx #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ addi r3,r1,STACK_FRAME_OVERHEAD bl vsx_unavailable_tm b interrupt_return_srr #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif addi r3,r1,STACK_FRAME_OVERHEAD bl vsx_unavailable_exception b interrupt_return_srr /** * Interrupt 0xf60 - Facility Unavailable Interrupt. * This is a synchronous interrupt in response to * executing an instruction without access to the facility that can be * resolved by the OS (e.g., FSCR, MSR). * Similar to FP unavailable. */ INT_DEFINE_BEGIN(facility_unavailable) IVEC=0xf60 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(facility_unavailable) EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20) GEN_INT_ENTRY facility_unavailable, virt=0, ool=1 EXC_REAL_END(facility_unavailable, 0xf60, 0x20) EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20) GEN_INT_ENTRY facility_unavailable, virt=1, ool=1 EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20) EXC_COMMON_BEGIN(facility_unavailable_common) GEN_COMMON facility_unavailable addi r3,r1,STACK_FRAME_OVERHEAD bl facility_unavailable_exception REST_NVGPRS(r1) /* instruction emulation may change GPRs */ b interrupt_return_srr /** * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt. * This is a synchronous interrupt in response to * executing an instruction without access to the facility that can only * be resolved in HV mode (e.g., HFSCR). * Similar to FP unavailable. */ INT_DEFINE_BEGIN(h_facility_unavailable) IVEC=0xf80 IHSRR=1 IKVM_REAL=1 IKVM_VIRT=1 INT_DEFINE_END(h_facility_unavailable) EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20) GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1 EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20) EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20) GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1 EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20) EXC_COMMON_BEGIN(h_facility_unavailable_common) GEN_COMMON h_facility_unavailable addi r3,r1,STACK_FRAME_OVERHEAD bl facility_unavailable_exception REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */ b interrupt_return_hsrr EXC_REAL_NONE(0xfa0, 0x20) EXC_VIRT_NONE(0x4fa0, 0x20) EXC_REAL_NONE(0xfc0, 0x20) EXC_VIRT_NONE(0x4fc0, 0x20) EXC_REAL_NONE(0xfe0, 0x20) EXC_VIRT_NONE(0x4fe0, 0x20) EXC_REAL_NONE(0x1000, 0x100) EXC_VIRT_NONE(0x5000, 0x100) EXC_REAL_NONE(0x1100, 0x100) EXC_VIRT_NONE(0x5100, 0x100) #ifdef CONFIG_CBE_RAS INT_DEFINE_BEGIN(cbe_system_error) IVEC=0x1200 IHSRR=1 INT_DEFINE_END(cbe_system_error) EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) GEN_INT_ENTRY cbe_system_error, virt=0 EXC_REAL_END(cbe_system_error, 0x1200, 0x100) EXC_VIRT_NONE(0x5200, 0x100) EXC_COMMON_BEGIN(cbe_system_error_common) GEN_COMMON cbe_system_error addi r3,r1,STACK_FRAME_OVERHEAD bl cbe_system_error_exception b interrupt_return_hsrr #else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1200, 0x100) EXC_VIRT_NONE(0x5200, 0x100) #endif /** * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. * This has been removed from the ISA before 2.01, which is the earliest * 64-bit BookS ISA supported, however the G5 / 970 implements this * interrupt with a non-architected feature available through the support * processor interface. */ INT_DEFINE_BEGIN(instruction_breakpoint) IVEC=0x1300 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(instruction_breakpoint) EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100) GEN_INT_ENTRY instruction_breakpoint, virt=0 EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100) EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100) GEN_INT_ENTRY instruction_breakpoint, virt=1 EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100) EXC_COMMON_BEGIN(instruction_breakpoint_common) GEN_COMMON instruction_breakpoint addi r3,r1,STACK_FRAME_OVERHEAD bl instruction_breakpoint_exception b interrupt_return_srr EXC_REAL_NONE(0x1400, 0x100) EXC_VIRT_NONE(0x5400, 0x100) /** * Interrupt 0x1500 - Soft Patch Interrupt * * Handling: * This is an implementation specific interrupt which can be used for a * range of exceptions. * * This interrupt handler is unique in that it runs the denormal assist * code even for guests (and even in guest context) without going to KVM, * for speed. POWER9 does not raise denorm exceptions, so this special case * could be phased out in future to reduce special cases. */ INT_DEFINE_BEGIN(denorm_exception) IVEC=0x1500 IHSRR=1 IBRANCH_TO_COMMON=0 IKVM_REAL=1 INT_DEFINE_END(denorm_exception) EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100) GEN_INT_ENTRY denorm_exception, virt=0 #ifdef CONFIG_PPC_DENORMALISATION andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ bne+ denorm_assist #endif GEN_BRANCH_TO_COMMON denorm_exception, virt=0 EXC_REAL_END(denorm_exception, 0x1500, 0x100) #ifdef CONFIG_PPC_DENORMALISATION EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) GEN_INT_ENTRY denorm_exception, virt=1 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */ bne+ denorm_assist GEN_BRANCH_TO_COMMON denorm_exception, virt=1 EXC_VIRT_END(denorm_exception, 0x5500, 0x100) #else EXC_VIRT_NONE(0x5500, 0x100) #endif #ifdef CONFIG_PPC_DENORMALISATION TRAMP_REAL_BEGIN(denorm_assist) BEGIN_FTR_SECTION /* * To denormalise we need to move a copy of the register to itself. * For POWER6 do that here for all FP regs. */ mfmsr r10 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) xori r10,r10,(MSR_FE0|MSR_FE1) mtmsrd r10 sync .Lreg=0 .rept 32 fmr .Lreg,.Lreg .Lreg=.Lreg+1 .endr FTR_SECTION_ELSE /* * To denormalise we need to move a copy of the register to itself. * For POWER7 do that here for the first 32 VSX registers only. */ mfmsr r10 oris r10,r10,MSR_VSX@h mtmsrd r10 sync .Lreg=0 .rept 32 XVCPSGNDP(.Lreg,.Lreg,.Lreg) .Lreg=.Lreg+1 .endr ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) BEGIN_FTR_SECTION b denorm_done END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* * To denormalise we need to move a copy of the register to itself. * For POWER8 we need to do that for all 64 VSX registers */ .Lreg=32 .rept 32 XVCPSGNDP(.Lreg,.Lreg,.Lreg) .Lreg=.Lreg+1 .endr denorm_done: mfspr r11,SPRN_HSRR0 subi r11,r11,4 mtspr SPRN_HSRR0,r11 mtcrf 0x80,r9 ld r9,PACA_EXGEN+EX_R9(r13) BEGIN_FTR_SECTION ld r10,PACA_EXGEN+EX_PPR(r13) mtspr SPRN_PPR,r10 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) BEGIN_FTR_SECTION ld r10,PACA_EXGEN+EX_CFAR(r13) mtspr SPRN_CFAR,r10 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) li r10,0 stb r10,PACAHSRR_VALID(r13) ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) ld r13,PACA_EXGEN+EX_R13(r13) HRFI_TO_UNKNOWN b . #endif EXC_COMMON_BEGIN(denorm_exception_common) GEN_COMMON denorm_exception addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception b interrupt_return_hsrr #ifdef CONFIG_CBE_RAS INT_DEFINE_BEGIN(cbe_maintenance) IVEC=0x1600 IHSRR=1 INT_DEFINE_END(cbe_maintenance) EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) GEN_INT_ENTRY cbe_maintenance, virt=0 EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) EXC_VIRT_NONE(0x5600, 0x100) EXC_COMMON_BEGIN(cbe_maintenance_common) GEN_COMMON cbe_maintenance addi r3,r1,STACK_FRAME_OVERHEAD bl cbe_maintenance_exception b interrupt_return_hsrr #else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1600, 0x100) EXC_VIRT_NONE(0x5600, 0x100) #endif INT_DEFINE_BEGIN(altivec_assist) IVEC=0x1700 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE IKVM_REAL=1 #endif INT_DEFINE_END(altivec_assist) EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100) GEN_INT_ENTRY altivec_assist, virt=0 EXC_REAL_END(altivec_assist, 0x1700, 0x100) EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100) GEN_INT_ENTRY altivec_assist, virt=1 EXC_VIRT_END(altivec_assist, 0x5700, 0x100) EXC_COMMON_BEGIN(altivec_assist_common) GEN_COMMON altivec_assist addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_ALTIVEC bl altivec_assist_exception REST_NVGPRS(r1) /* instruction emulation may change GPRs */ #else bl unknown_exception #endif b interrupt_return_srr #ifdef CONFIG_CBE_RAS INT_DEFINE_BEGIN(cbe_thermal) IVEC=0x1800 IHSRR=1 INT_DEFINE_END(cbe_thermal) EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) GEN_INT_ENTRY cbe_thermal, virt=0 EXC_REAL_END(cbe_thermal, 0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) EXC_COMMON_BEGIN(cbe_thermal_common) GEN_COMMON cbe_thermal addi r3,r1,STACK_FRAME_OVERHEAD bl cbe_thermal_exception b interrupt_return_hsrr #else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) #endif #ifdef CONFIG_PPC_WATCHDOG INT_DEFINE_BEGIN(soft_nmi) IVEC=0x900 ISTACK=0 ICFAR=0 INT_DEFINE_END(soft_nmi) /* * Branch to soft_nmi_interrupt using the emergency stack. The emergency * stack is one that is usable by maskable interrupts so long as MSR_EE * remains off. It is used for recovery when something has corrupted the * normal kernel stack, for example. The "soft NMI" must not use the process * stack because we want irq disabled sections to avoid touching the stack * at all (other than PMU interrupts), so use the emergency stack for this, * and run it entirely with interrupts hard disabled. */ EXC_COMMON_BEGIN(soft_nmi_common) mr r10,r1 ld r1,PACAEMERGSP(r13) subi r1,r1,INT_FRAME_SIZE __GEN_COMMON_BODY soft_nmi addi r3,r1,STACK_FRAME_OVERHEAD bl soft_nmi_interrupt /* Clear MSR_RI before setting SRR0 and SRR1. */ li r9,0 mtmsrd r9,1 kuap_kernel_restore r9, r10 EXCEPTION_RESTORE_REGS hsrr=0 RFI_TO_KERNEL #endif /* CONFIG_PPC_WATCHDOG */ /* * An interrupt came in while soft-disabled. We set paca->irq_happened, then: * - If it was a decrementer interrupt, we bump the dec to max and return. * - If it was a doorbell we return immediately since doorbells are edge * triggered and won't automatically refire. * - If it was a HMI we return immediately since we handled it in realmode * and it won't refire. * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. * This is called with r10 containing the value to OR to the paca field. */ .macro MASKED_INTERRUPT hsrr=0 .if \hsrr masked_Hinterrupt: .else masked_interrupt: .endif stw r9,PACA_EXGEN+EX_CCR(r13) #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG /* * Ensure there was no previous MUST_HARD_MASK interrupt or * HARD_DIS setting. If this does fire, the interrupt is still * masked and MSR[EE] will be cleared on return, so no need to * panic, but somebody probably enabled MSR[EE] under * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common * cause. */ lbz r9,PACAIRQHAPPENED(r13) andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS) 0: tdnei r9,0 EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) #endif lbz r9,PACAIRQHAPPENED(r13) or r9,r9,r10 stb r9,PACAIRQHAPPENED(r13) .if ! \hsrr cmpwi r10,PACA_IRQ_DEC bne 1f LOAD_REG_IMMEDIATE(r9, 0x7fffffff) mtspr SPRN_DEC,r9 #ifdef CONFIG_PPC_WATCHDOG lwz r9,PACA_EXGEN+EX_CCR(r13) b soft_nmi_common #else b 2f #endif .endif 1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK beq 2f xori r12,r12,MSR_EE /* clear MSR_EE */ .if \hsrr mtspr SPRN_HSRR1,r12 .else mtspr SPRN_SRR1,r12 .endif ori r9,r9,PACA_IRQ_HARD_DIS stb r9,PACAIRQHAPPENED(r13) 2: /* done */ li r9,0 .if \hsrr stb r9,PACAHSRR_VALID(r13) .else stb r9,PACASRR_VALID(r13) .endif SEARCH_RESTART_TABLE cmpdi r12,0 beq 3f .if \hsrr mtspr SPRN_HSRR0,r12 .else mtspr SPRN_SRR0,r12 .endif 3: ld r9,PACA_EXGEN+EX_CTR(r13) mtctr r9 lwz r9,PACA_EXGEN+EX_CCR(r13) mtcrf 0x80,r9 std r1,PACAR1(r13) ld r9,PACA_EXGEN+EX_R9(r13) ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) ld r13,PACA_EXGEN+EX_R13(r13) /* May return to masked low address where r13 is not set up */ .if \hsrr HRFI_TO_KERNEL .else RFI_TO_KERNEL .endif b . .endm TRAMP_REAL_BEGIN(stf_barrier_fallback) std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) sync ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ori 31,31,0 .rept 14 b 1f 1: .endr blr /* Clobbers r10, r11, ctr */ .macro L1D_DISPLACEMENT_FLUSH ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ld r11,PACA_L1D_FLUSH_SIZE(r13) srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ mtctr r11 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ /* order ld/st prior to dcbt stop all streams with flushing */ sync /* * The load addresses are at staggered offsets within cachelines, * which suits some pipelines better (on others it should not * hurt). */ 1: ld r11,(0x80 + 8)*0(r10) ld r11,(0x80 + 8)*1(r10) ld r11,(0x80 + 8)*2(r10) ld r11,(0x80 + 8)*3(r10) ld r11,(0x80 + 8)*4(r10) ld r11,(0x80 + 8)*5(r10) ld r11,(0x80 + 8)*6(r10) ld r11,(0x80 + 8)*7(r10) addi r10,r10,0x80*8 bdnz 1b .endm TRAMP_REAL_BEGIN(entry_flush_fallback) std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) mfctr r9 L1D_DISPLACEMENT_FLUSH mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ld r11,PACA_EXRFI+EX_R11(r13) blr /* * The SCV entry flush happens with interrupts enabled, so it must disable * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10 * (containing LR) does not need to be preserved here because scv entry * puts 0 in the pt_regs, CTR can be clobbered for the same reason. */ TRAMP_REAL_BEGIN(scv_entry_flush_fallback) li r10,0 mtmsrd r10,1 lbz r10,PACAIRQHAPPENED(r13) ori r10,r10,PACA_IRQ_HARD_DIS stb r10,PACAIRQHAPPENED(r13) std r11,PACA_EXRFI+EX_R11(r13) L1D_DISPLACEMENT_FLUSH ld r11,PACA_EXRFI+EX_R11(r13) li r10,MSR_RI mtmsrd r10,1 blr TRAMP_REAL_BEGIN(rfi_flush_fallback) SET_SCRATCH0(r13); GET_PACA(r13); std r1,PACA_EXRFI+EX_R12(r13) ld r1,PACAKSAVE(r13) std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) mfctr r9 L1D_DISPLACEMENT_FLUSH mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ld r11,PACA_EXRFI+EX_R11(r13) ld r1,PACA_EXRFI+EX_R12(r13) GET_SCRATCH0(r13); rfid TRAMP_REAL_BEGIN(hrfi_flush_fallback) SET_SCRATCH0(r13); GET_PACA(r13); std r1,PACA_EXRFI+EX_R12(r13) ld r1,PACAKSAVE(r13) std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) mfctr r9 L1D_DISPLACEMENT_FLUSH mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ld r11,PACA_EXRFI+EX_R11(r13) ld r1,PACA_EXRFI+EX_R12(r13) GET_SCRATCH0(r13); hrfid TRAMP_REAL_BEGIN(rfscv_flush_fallback) /* system call volatile */ mr r7,r13 GET_PACA(r13); mr r8,r1 ld r1,PACAKSAVE(r13) mfctr r9 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ld r11,PACA_L1D_FLUSH_SIZE(r13) srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ mtctr r11 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ /* order ld/st prior to dcbt stop all streams with flushing */ sync /* * The load adresses are at staggered offsets within cachelines, * which suits some pipelines better (on others it should not * hurt). */ 1: ld r11,(0x80 + 8)*0(r10) ld r11,(0x80 + 8)*1(r10) ld r11,(0x80 + 8)*2(r10) ld r11,(0x80 + 8)*3(r10) ld r11,(0x80 + 8)*4(r10) ld r11,(0x80 + 8)*5(r10) ld r11,(0x80 + 8)*6(r10) ld r11,(0x80 + 8)*7(r10) addi r10,r10,0x80*8 bdnz 1b mtctr r9 li r9,0 li r10,0 li r11,0 mr r1,r8 mr r13,r7 RFSCV USE_TEXT_SECTION() #ifdef CONFIG_KVM_BOOK3S_64_HANDLER kvm_interrupt: /* * The conditional branch in KVMTEST can't reach all the way, * make a stub. */ b kvmppc_interrupt #endif _GLOBAL(do_uaccess_flush) UACCESS_FLUSH_FIXUP_SECTION nop nop nop blr L1D_DISPLACEMENT_FLUSH blr _ASM_NOKPROBE_SYMBOL(do_uaccess_flush) EXPORT_SYMBOL(do_uaccess_flush) MASKED_INTERRUPT MASKED_INTERRUPT hsrr=1 USE_FIXED_SECTION(virt_trampolines) /* * All code below __end_soft_masked is treated as soft-masked. If * any code runs here with MSR[EE]=1, it must then cope with pending * soft interrupt being raised (i.e., by ensuring it is replayed). * * The __end_interrupts marker must be past the out-of-line (OOL) * handlers, so that they are copied to real address 0x100 when running * a relocatable kernel. This ensures they can be reached from the short * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch * directly, without using LOAD_HANDLER(). */ .align 7 .globl __end_interrupts __end_interrupts: DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines) CLOSE_FIXED_SECTION(real_vectors); CLOSE_FIXED_SECTION(real_trampolines); CLOSE_FIXED_SECTION(virt_vectors); CLOSE_FIXED_SECTION(virt_trampolines); USE_TEXT_SECTION() /* MSR[RI] should be clear because this uses SRR[01] */ _GLOBAL(enable_machine_check) mflr r0 bcl 20,31,$+4 0: mflr r3 addi r3,r3,(1f - 0b) mtspr SPRN_SRR0,r3 mfmsr r3 ori r3,r3,MSR_ME mtspr SPRN_SRR1,r3 RFI_TO_KERNEL 1: mtlr r0 blr /* MSR[RI] should be clear because this uses SRR[01] */ disable_machine_check: mflr r0 bcl 20,31,$+4 0: mflr r3 addi r3,r3,(1f - 0b) mtspr SPRN_SRR0,r3 mfmsr r3 li r4,MSR_ME andc r3,r3,r4 mtspr SPRN_SRR1,r3 RFI_TO_KERNEL 1: mtlr r0 blr
aixcc-public/challenge-001-exemplar-source
2,994
arch/powerpc/kernel/optprobes_head.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Code to prepare detour buffer for optprobes in Kernel. * * Copyright 2017, Anju T, IBM Corp. */ #include <asm/ppc_asm.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #ifdef CONFIG_PPC64 #define SAVE_30GPRS(base) SAVE_GPRS(2, 31, base) #define REST_30GPRS(base) REST_GPRS(2, 31, base) #define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop; nop; nop #else #define SAVE_30GPRS(base) stmw r2, GPR2(base) #define REST_30GPRS(base) lmw r2, GPR2(base) #define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop #endif #define OPT_SLOT_SIZE 65536 .balign 4 /* * Reserve an area to allocate slots for detour buffer. * This is part of .text section (rather than vmalloc area) * as this needs to be within 32MB of the probed address. */ .global optinsn_slot optinsn_slot: .space OPT_SLOT_SIZE /* * Optprobe template: * This template gets copied into one of the slots in optinsn_slot * and gets fixed up with real optprobe structures et al. */ .global optprobe_template_entry optprobe_template_entry: /* Create an in-memory pt_regs */ PPC_STLU r1,-INT_FRAME_SIZE(r1) SAVE_GPR(0,r1) /* Save the previous SP into stack */ addi r0,r1,INT_FRAME_SIZE PPC_STL r0,GPR1(r1) SAVE_30GPRS(r1) /* Save SPRS */ mfmsr r5 PPC_STL r5,_MSR(r1) li r5,0x700 PPC_STL r5,_TRAP(r1) li r5,0 PPC_STL r5,ORIG_GPR3(r1) PPC_STL r5,RESULT(r1) mfctr r5 PPC_STL r5,_CTR(r1) mflr r5 PPC_STL r5,_LINK(r1) mfspr r5,SPRN_XER PPC_STL r5,_XER(r1) mfcr r5 PPC_STL r5,_CCR(r1) #ifdef CONFIG_PPC64 lbz r5,PACAIRQSOFTMASK(r13) std r5,SOFTE(r1) #endif /* * We may get here from a module, so load the kernel TOC in r2. * The original TOC gets restored when pt_regs is restored * further below. */ #ifdef CONFIG_PPC64 LOAD_PACA_TOC() #endif .global optprobe_template_op_address optprobe_template_op_address: /* * Parameters to optimized_callback(): * 1. optimized_kprobe structure in r3 */ TEMPLATE_FOR_IMM_LOAD_INSNS /* 2. pt_regs pointer in r4 */ addi r4,r1,STACK_FRAME_OVERHEAD .global optprobe_template_call_handler optprobe_template_call_handler: /* Branch to optimized_callback() */ nop /* * Parameters for instruction emulation: * 1. Pass SP in register r3. */ addi r3,r1,STACK_FRAME_OVERHEAD .global optprobe_template_insn optprobe_template_insn: /* 2, Pass instruction to be emulated in r4 */ TEMPLATE_FOR_IMM_LOAD_INSNS .global optprobe_template_call_emulate optprobe_template_call_emulate: /* Branch to emulate_step() */ nop /* * All done. * Now, restore the registers... */ PPC_LL r5,_MSR(r1) mtmsr r5 PPC_LL r5,_CTR(r1) mtctr r5 PPC_LL r5,_LINK(r1) mtlr r5 PPC_LL r5,_XER(r1) mtxer r5 PPC_LL r5,_CCR(r1) mtcr r5 REST_GPR(0,r1) REST_30GPRS(r1) /* Restore the previous SP */ addi r1,r1,INT_FRAME_SIZE .global optprobe_template_ret optprobe_template_ret: /* ... and jump back from trampoline */ nop .global optprobe_template_end optprobe_template_end:
aixcc-public/challenge-001-exemplar-source
43,079
arch/powerpc/kernel/exceptions-64e.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Boot code and exception vectors for Book3E processors * * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. */ #include <linux/threads.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/setup.h> #include <asm/thread_info.h> #include <asm/reg_a2.h> #include <asm/exception-64e.h> #include <asm/bug.h> #include <asm/irqflags.h> #include <asm/ptrace.h> #include <asm/ppc-opcode.h> #include <asm/mmu.h> #include <asm/hw_irq.h> #include <asm/kvm_asm.h> #include <asm/kvm_booke_hv_asm.h> #include <asm/feature-fixups.h> #include <asm/context_tracking.h> /* 64e interrupt returns always use SRR registers */ #define fast_interrupt_return fast_interrupt_return_srr #define interrupt_return interrupt_return_srr /* XXX This will ultimately add space for a special exception save * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... * when taking special interrupts. For now we don't support that, * special interrupts from within a non-standard level will probably * blow you up */ #define SPECIAL_EXC_SRR0 0 #define SPECIAL_EXC_SRR1 1 #define SPECIAL_EXC_SPRG_GEN 2 #define SPECIAL_EXC_SPRG_TLB 3 #define SPECIAL_EXC_MAS0 4 #define SPECIAL_EXC_MAS1 5 #define SPECIAL_EXC_MAS2 6 #define SPECIAL_EXC_MAS3 7 #define SPECIAL_EXC_MAS6 8 #define SPECIAL_EXC_MAS7 9 #define SPECIAL_EXC_MAS5 10 /* E.HV only */ #define SPECIAL_EXC_MAS8 11 /* E.HV only */ #define SPECIAL_EXC_IRQHAPPENED 12 #define SPECIAL_EXC_DEAR 13 #define SPECIAL_EXC_ESR 14 #define SPECIAL_EXC_SOFTE 15 #define SPECIAL_EXC_CSRR0 16 #define SPECIAL_EXC_CSRR1 17 /* must be even to keep 16-byte stack alignment */ #define SPECIAL_EXC_END 18 #define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8) #define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288) #define SPECIAL_EXC_STORE(reg, name) \ std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) #define SPECIAL_EXC_LOAD(reg, name) \ ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) special_reg_save: /* * We only need (or have stack space) to save this stuff if * we interrupted the kernel. */ ld r3,_MSR(r1) andi. r3,r3,MSR_PR bnelr /* * Advance to the next TLB exception frame for handler * types that don't do it automatically. */ LOAD_REG_ADDR(r11,extlb_level_exc) lwz r12,0(r11) mfspr r10,SPRN_SPRG_TLB_EXFRAME add r10,r10,r12 mtspr SPRN_SPRG_TLB_EXFRAME,r10 /* * Save registers needed to allow nesting of certain exceptions * (such as TLB misses) inside special exception levels */ mfspr r10,SPRN_SRR0 SPECIAL_EXC_STORE(r10,SRR0) mfspr r10,SPRN_SRR1 SPECIAL_EXC_STORE(r10,SRR1) mfspr r10,SPRN_SPRG_GEN_SCRATCH SPECIAL_EXC_STORE(r10,SPRG_GEN) mfspr r10,SPRN_SPRG_TLB_SCRATCH SPECIAL_EXC_STORE(r10,SPRG_TLB) mfspr r10,SPRN_MAS0 SPECIAL_EXC_STORE(r10,MAS0) mfspr r10,SPRN_MAS1 SPECIAL_EXC_STORE(r10,MAS1) mfspr r10,SPRN_MAS2 SPECIAL_EXC_STORE(r10,MAS2) mfspr r10,SPRN_MAS3 SPECIAL_EXC_STORE(r10,MAS3) mfspr r10,SPRN_MAS6 SPECIAL_EXC_STORE(r10,MAS6) mfspr r10,SPRN_MAS7 SPECIAL_EXC_STORE(r10,MAS7) BEGIN_FTR_SECTION mfspr r10,SPRN_MAS5 SPECIAL_EXC_STORE(r10,MAS5) mfspr r10,SPRN_MAS8 SPECIAL_EXC_STORE(r10,MAS8) /* MAS5/8 could have inappropriate values if we interrupted KVM code */ li r10,0 mtspr SPRN_MAS5,r10 mtspr SPRN_MAS8,r10 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfspr r10,SPRN_DEAR SPECIAL_EXC_STORE(r10,DEAR) mfspr r10,SPRN_ESR SPECIAL_EXC_STORE(r10,ESR) ld r10,_NIP(r1) SPECIAL_EXC_STORE(r10,CSRR0) ld r10,_MSR(r1) SPECIAL_EXC_STORE(r10,CSRR1) blr ret_from_level_except: ld r3,_MSR(r1) andi. r3,r3,MSR_PR beq 1f REST_NVGPRS(r1) b interrupt_return 1: LOAD_REG_ADDR(r11,extlb_level_exc) lwz r12,0(r11) mfspr r10,SPRN_SPRG_TLB_EXFRAME sub r10,r10,r12 mtspr SPRN_SPRG_TLB_EXFRAME,r10 /* * It's possible that the special level exception interrupted a * TLB miss handler, and inserted the same entry that the * interrupted handler was about to insert. On CPUs without TLB * write conditional, this can result in a duplicate TLB entry. * Wipe all non-bolted entries to be safe. * * Note that this doesn't protect against any TLB misses * we may take accessing the stack from here to the end of * the special level exception. It's not clear how we can * reasonably protect against that, but only CPUs with * neither TLB write conditional nor bolted kernel memory * are affected. Do any such CPUs even exist? */ PPC_TLBILX_ALL(0,R0) REST_NVGPRS(r1) SPECIAL_EXC_LOAD(r10,SRR0) mtspr SPRN_SRR0,r10 SPECIAL_EXC_LOAD(r10,SRR1) mtspr SPRN_SRR1,r10 SPECIAL_EXC_LOAD(r10,SPRG_GEN) mtspr SPRN_SPRG_GEN_SCRATCH,r10 SPECIAL_EXC_LOAD(r10,SPRG_TLB) mtspr SPRN_SPRG_TLB_SCRATCH,r10 SPECIAL_EXC_LOAD(r10,MAS0) mtspr SPRN_MAS0,r10 SPECIAL_EXC_LOAD(r10,MAS1) mtspr SPRN_MAS1,r10 SPECIAL_EXC_LOAD(r10,MAS2) mtspr SPRN_MAS2,r10 SPECIAL_EXC_LOAD(r10,MAS3) mtspr SPRN_MAS3,r10 SPECIAL_EXC_LOAD(r10,MAS6) mtspr SPRN_MAS6,r10 SPECIAL_EXC_LOAD(r10,MAS7) mtspr SPRN_MAS7,r10 BEGIN_FTR_SECTION SPECIAL_EXC_LOAD(r10,MAS5) mtspr SPRN_MAS5,r10 SPECIAL_EXC_LOAD(r10,MAS8) mtspr SPRN_MAS8,r10 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) SPECIAL_EXC_LOAD(r10,DEAR) mtspr SPRN_DEAR,r10 SPECIAL_EXC_LOAD(r10,ESR) mtspr SPRN_ESR,r10 stdcx. r0,0,r1 /* to clear the reservation */ REST_GPRS(2, 9, r1) ld r10,_CTR(r1) ld r11,_XER(r1) mtctr r10 mtxer r11 blr .macro ret_from_level srr0 srr1 paca_ex scratch bl ret_from_level_except ld r10,_LINK(r1) ld r11,_CCR(r1) ld r0,GPR13(r1) mtlr r10 mtcr r11 REST_GPRS(10, 12, r1) mtspr \scratch,r0 std r10,\paca_ex+EX_R10(r13); std r11,\paca_ex+EX_R11(r13); ld r10,_NIP(r1) ld r11,_MSR(r1) REST_GPR(0, r1) REST_GPR(1, r1) mtspr \srr0,r10 mtspr \srr1,r11 ld r10,\paca_ex+EX_R10(r13) ld r11,\paca_ex+EX_R11(r13) mfspr r13,\scratch .endm ret_from_crit_except: ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH rfci ret_from_mc_except: ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH rfmci /* Exception prolog code for all exceptions */ #define EXCEPTION_PROLOG(n, intnum, type, addition) \ mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ std r10,PACA_EX##type+EX_R10(r13); \ std r11,PACA_EX##type+EX_R11(r13); \ mfcr r10; /* save CR */ \ mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ addition; /* additional code for that exc. */ \ std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ type##_SET_KSTACK; /* get special stack if necessary */\ andi. r10,r11,MSR_PR; /* save stack pointer */ \ beq 1f; /* branch around if supervisor */ \ ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ 1: type##_BTB_FLUSH \ cmpdi cr1,r1,0; /* check if SP makes sense */ \ bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ /* Exception type-specific macros */ #define GEN_SET_KSTACK \ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ #define SPRN_GEN_SRR0 SPRN_SRR0 #define SPRN_GEN_SRR1 SPRN_SRR1 #define GDBELL_SET_KSTACK GEN_SET_KSTACK #define SPRN_GDBELL_SRR0 SPRN_GSRR0 #define SPRN_GDBELL_SRR1 SPRN_GSRR1 #define CRIT_SET_KSTACK \ ld r1,PACA_CRIT_STACK(r13); \ subi r1,r1,SPECIAL_EXC_FRAME_SIZE #define SPRN_CRIT_SRR0 SPRN_CSRR0 #define SPRN_CRIT_SRR1 SPRN_CSRR1 #define DBG_SET_KSTACK \ ld r1,PACA_DBG_STACK(r13); \ subi r1,r1,SPECIAL_EXC_FRAME_SIZE #define SPRN_DBG_SRR0 SPRN_DSRR0 #define SPRN_DBG_SRR1 SPRN_DSRR1 #define MC_SET_KSTACK \ ld r1,PACA_MC_STACK(r13); \ subi r1,r1,SPECIAL_EXC_FRAME_SIZE #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1 #define GEN_BTB_FLUSH \ START_BTB_FLUSH_SECTION \ beq 1f; \ BTB_FLUSH(r10) \ 1: \ END_BTB_FLUSH_SECTION #define CRIT_BTB_FLUSH \ START_BTB_FLUSH_SECTION \ BTB_FLUSH(r10) \ END_BTB_FLUSH_SECTION #define DBG_BTB_FLUSH CRIT_BTB_FLUSH #define MC_BTB_FLUSH CRIT_BTB_FLUSH #define GDBELL_BTB_FLUSH GEN_BTB_FLUSH #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) #define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) #define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) #define MC_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) /* Variants of the "addition" argument for the prolog */ #define PROLOG_ADDITION_NONE_GEN(n) #define PROLOG_ADDITION_NONE_GDBELL(n) #define PROLOG_ADDITION_NONE_CRIT(n) #define PROLOG_ADDITION_NONE_DBG(n) #define PROLOG_ADDITION_NONE_MC(n) #define PROLOG_ADDITION_MASKABLE_GEN(n) \ lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \ andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ bne masked_interrupt_book3e_##n /* * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is * called, because that does SAVE_NVGPRS which must see the original register * values, otherwise the scratch values might be restored when exiting the * interrupt. */ #define PROLOG_ADDITION_2REGS_GEN(n) \ std r14,PACA_EXGEN+EX_R14(r13); \ std r15,PACA_EXGEN+EX_R15(r13) #define PROLOG_ADDITION_1REG_GEN(n) \ std r14,PACA_EXGEN+EX_R14(r13); #define PROLOG_ADDITION_2REGS_CRIT(n) \ std r14,PACA_EXCRIT+EX_R14(r13); \ std r15,PACA_EXCRIT+EX_R15(r13) #define PROLOG_ADDITION_2REGS_DBG(n) \ std r14,PACA_EXDBG+EX_R14(r13); \ std r15,PACA_EXDBG+EX_R15(r13) #define PROLOG_ADDITION_2REGS_MC(n) \ std r14,PACA_EXMC+EX_R14(r13); \ std r15,PACA_EXMC+EX_R15(r13) /* Core exception code for all exceptions except TLB misses. */ #define EXCEPTION_COMMON_LVL(n, scratch, excf) \ exc_##n##_common: \ SAVE_GPR(0, r1); /* save r0 in stackframe */ \ SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \ std r10,_NIP(r1); /* save SRR0 to stackframe */ \ std r11,_MSR(r1); /* save SRR1 to stackframe */ \ beq 2f; /* if from kernel mode */ \ 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ mfspr r5,scratch; /* get back r13 */ \ SAVE_GPR(12, r1); /* save r12 in stackframe */ \ LOAD_PACA_TOC(); /* get kernel TOC into r2 */ \ mflr r6; /* save LR in stackframe */ \ mfctr r7; /* save CTR in stackframe */ \ mfspr r8,SPRN_XER; /* save XER in stackframe */ \ ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ LOAD_REG_IMMEDIATE(r12, STACK_FRAME_REGS_MARKER); \ ZEROIZE_GPR(0); \ std r3,GPR10(r1); /* save r10 to stackframe */ \ std r4,GPR11(r1); /* save r11 to stackframe */ \ std r5,GPR13(r1); /* save it to stackframe */ \ std r6,_LINK(r1); \ std r7,_CTR(r1); \ std r8,_XER(r1); \ li r3,(n); /* regs.trap vector */ \ std r9,0(r1); /* store stack frame back link */ \ std r10,_CCR(r1); /* store orig CR in stackframe */ \ std r9,GPR1(r1); /* store stack frame back link */ \ std r11,SOFTE(r1); /* and save it to stackframe */ \ std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ std r3,_TRAP(r1); /* set trap number */ \ std r0,RESULT(r1); /* clear regs->result */ \ SAVE_NVGPRS(r1); #define EXCEPTION_COMMON(n) \ EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN) #define EXCEPTION_COMMON_CRIT(n) \ EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT) #define EXCEPTION_COMMON_MC(n) \ EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC) #define EXCEPTION_COMMON_DBG(n) \ EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG) /* XXX FIXME: Restore r14/r15 when necessary */ #define BAD_STACK_TRAMPOLINE(n) \ exc_##n##_bad_stack: \ li r1,(n); /* get exception number */ \ sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ b bad_stack_book3e; /* bad stack error */ /* WARNING: If you change the layout of this stub, make sure you check * the debug exception handler which handles single stepping * into exceptions from userspace, and the MM code in * arch/powerpc/mm/tlb_nohash.c which patches the branch here * and would need to be updated if that branch is moved */ #define EXCEPTION_STUB(loc, label) \ . = interrupt_base_book3e + loc; \ nop; /* To make debug interrupts happy */ \ b exc_##label##_book3e; #define ACK_NONE(r) #define ACK_DEC(r) \ lis r,TSR_DIS@h; \ mtspr SPRN_TSR,r #define ACK_FIT(r) \ lis r,TSR_FIS@h; \ mtspr SPRN_TSR,r /* Used by asynchronous interrupt that may happen in the idle loop. * * This check if the thread was in the idle loop, and if yes, returns * to the caller rather than the PC. This is to avoid a race if * interrupts happen before the wait instruction. */ #define CHECK_NAPPING() \ ld r11, PACA_THREAD_INFO(r13); \ ld r10,TI_LOCAL_FLAGS(r11); \ andi. r9,r10,_TLF_NAPPING; \ beq+ 1f; \ ld r8,_LINK(r1); \ rlwinm r7,r10,0,~_TLF_NAPPING; \ std r8,_NIP(r1); \ std r7,TI_LOCAL_FLAGS(r11); \ 1: #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ START_EXCEPTION(label); \ NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ EXCEPTION_COMMON(trapnum) \ ack(r8); \ CHECK_NAPPING(); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ b interrupt_return /* * And here we have the exception vectors ! */ .text .balign 0x1000 .globl interrupt_base_book3e interrupt_base_book3e: /* fake trap */ EXCEPTION_STUB(0x000, machine_check) EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */ EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */ EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */ EXCEPTION_STUB(0x0e0, program) /* 0x0700 */ EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */ EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */ EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */ EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ EXCEPTION_STUB(0x1c0, data_tlb_miss) EXCEPTION_STUB(0x1e0, instruction_tlb_miss) EXCEPTION_STUB(0x200, altivec_unavailable) EXCEPTION_STUB(0x220, altivec_assist) EXCEPTION_STUB(0x260, perfmon) EXCEPTION_STUB(0x280, doorbell) EXCEPTION_STUB(0x2a0, doorbell_crit) EXCEPTION_STUB(0x2c0, guest_doorbell) EXCEPTION_STUB(0x2e0, guest_doorbell_crit) EXCEPTION_STUB(0x300, hypercall) EXCEPTION_STUB(0x320, ehpriv) EXCEPTION_STUB(0x340, lrat_error) .globl __end_interrupts __end_interrupts: /* Critical Input Interrupt */ START_EXCEPTION(critical_input); CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x100) bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_nmi_exception b ret_from_crit_except /* Machine Check Interrupt */ START_EXCEPTION(machine_check); MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_MC(0x000) bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_exception b ret_from_mc_except /* Data Storage Interrupt */ START_EXCEPTION(data_storage) NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR std r14,_DEAR(r1) std r15,_ESR(r1) ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) EXCEPTION_COMMON(0x300) b storage_fault_common /* Instruction Storage Interrupt */ START_EXCEPTION(instruction_storage); NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, PROLOG_ADDITION_2REGS) li r15,0 mr r14,r10 std r14,_DEAR(r1) std r15,_ESR(r1) ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) EXCEPTION_COMMON(0x400) b storage_fault_common /* External Input Interrupt */ MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, external_input, do_IRQ, ACK_NONE) /* Alignment */ START_EXCEPTION(alignment); NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR std r14,_DEAR(r1) std r15,_ESR(r1) ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) EXCEPTION_COMMON(0x600) b alignment_more /* no room, go out of line */ /* Program Interrupt */ START_EXCEPTION(program); NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, PROLOG_ADDITION_1REG) mfspr r14,SPRN_ESR std r14,_ESR(r1) ld r14,PACA_EXGEN+EX_R14(r13) EXCEPTION_COMMON(0x700) addi r3,r1,STACK_FRAME_OVERHEAD bl program_check_exception REST_NVGPRS(r1) b interrupt_return /* Floating Point Unavailable Interrupt */ START_EXCEPTION(fp_unavailable); NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ EXCEPTION_COMMON(0x800) ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f bl load_up_fpu b fast_interrupt_return 1: addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_fp_unavailable_exception b interrupt_return /* Altivec Unavailable Interrupt */ START_EXCEPTION(altivec_unavailable); NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ EXCEPTION_COMMON(0x200) #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f bl load_up_altivec b fast_interrupt_return 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif addi r3,r1,STACK_FRAME_OVERHEAD bl altivec_unavailable_exception b interrupt_return /* AltiVec Assist */ START_EXCEPTION(altivec_assist); NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_ALTIVEC_ASSIST, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220) addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION bl altivec_assist_exception END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) REST_NVGPRS(r1) #else bl unknown_exception #endif b interrupt_return /* Decrementer Interrupt */ MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, decrementer, timer_interrupt, ACK_DEC) /* Fixed Interval Timer Interrupt */ MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, fixed_interval, unknown_exception, ACK_FIT) /* Watchdog Timer Interrupt */ START_EXCEPTION(watchdog); CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x9f0) bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_BOOKE_WDT bl WatchdogException #else bl unknown_nmi_exception #endif b ret_from_crit_except /* System Call Interrupt */ START_EXCEPTION(system_call) mr r9,r13 /* keep a copy of userland r13 */ mfspr r11,SPRN_SRR0 /* get return address */ mfspr r12,SPRN_SRR1 /* get previous MSR */ mfspr r13,SPRN_SPRG_PACA /* get our PACA */ b system_call_common /* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20) addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception b interrupt_return /* Debug exception as a critical interrupt*/ START_EXCEPTION(debug_crit); CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, PROLOG_ADDITION_2REGS) /* * If there is a single step or branch-taken exception in an * exception entry sequence, it was probably meant to apply to * the code where the exception occurred (since exception entry * doesn't turn off DE automatically). We simulate the effect * of turning off DE on entry to an exception handler by turning * off DE in the CSRR1 value and clearing the debug status. */ mfspr r14,SPRN_DBSR /* check single-step/branch taken */ andis. r15,r14,(DBSR_IC|DBSR_BT)@h beq+ 1f #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r15) LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e) LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 #else LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) cmpld cr0, r10, r14 LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts) cmpld cr1, r10, r14 #endif blt+ cr0,1f bge+ cr1,1f /* here it looks like we got an inappropriate debug exception. */ lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ mtspr SPRN_DBSR,r14 mtspr SPRN_CSRR1,r11 lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */ ld r1,PACA_EXCRIT+EX_R1(r13) ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) mtcr r10 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ ld r11,PACA_EXCRIT+EX_R11(r13) mfspr r13,SPRN_SPRG_CRIT_SCRATCH rfci /* Normal debug exception */ /* XXX We only handle coming from userspace for now since we can't * quite save properly an interrupted kernel state yet */ 1: andi. r14,r11,MSR_PR; /* check for userspace again */ beq kernel_dbg_exc; /* if from kernel mode */ /* Now we mash up things to make it look like we are coming on a * normal exception */ mfspr r14,SPRN_DBSR std r14,_DSISR(r1) ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) EXCEPTION_COMMON_CRIT(0xd00) addi r3,r1,STACK_FRAME_OVERHEAD bl DebugException REST_NVGPRS(r1) b interrupt_return kernel_dbg_exc: b . /* NYI */ /* Debug exception as a debug interrupt*/ START_EXCEPTION(debug_debug); DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, PROLOG_ADDITION_2REGS) /* * If there is a single step or branch-taken exception in an * exception entry sequence, it was probably meant to apply to * the code where the exception occurred (since exception entry * doesn't turn off DE automatically). We simulate the effect * of turning off DE on entry to an exception handler by turning * off DE in the DSRR1 value and clearing the debug status. */ mfspr r14,SPRN_DBSR /* check single-step/branch taken */ andis. r15,r14,(DBSR_IC|DBSR_BT)@h beq+ 1f #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r15) LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e) LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 #else LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) cmpld cr0, r10, r14 LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts) cmpld cr1, r10, r14 #endif blt+ cr0,1f bge+ cr1,1f /* here it looks like we got an inappropriate debug exception. */ lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ mtspr SPRN_DBSR,r14 mtspr SPRN_DSRR1,r11 lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ ld r1,PACA_EXDBG+EX_R1(r13) ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) mtcr r10 ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ ld r11,PACA_EXDBG+EX_R11(r13) mfspr r13,SPRN_SPRG_DBG_SCRATCH rfdi /* Normal debug exception */ /* XXX We only handle coming from userspace for now since we can't * quite save properly an interrupted kernel state yet */ 1: andi. r14,r11,MSR_PR; /* check for userspace again */ beq kernel_dbg_exc; /* if from kernel mode */ /* Now we mash up things to make it look like we are coming on a * normal exception */ mfspr r14,SPRN_DBSR std r14,_DSISR(r1) ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) EXCEPTION_COMMON_DBG(0xd08) addi r3,r1,STACK_FRAME_OVERHEAD bl DebugException REST_NVGPRS(r1) b interrupt_return START_EXCEPTION(perfmon); NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x260) CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD /* * XXX: Returning from performance_monitor_exception taken as a * soft-NMI (Linux irqs disabled) may be risky to use interrupt_return * and could cause bugs in return or elsewhere. That case should just * restore registers and return. There is a workaround for one known * problem in interrupt_exit_kernel_prepare(). */ bl performance_monitor_exception b interrupt_return /* Doorbell interrupt */ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, doorbell, doorbell_exception, ACK_NONE) /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2a0) bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_nmi_exception b ret_from_crit_except /* * Guest doorbell interrupt * This general exception use GSRRx save/restore registers */ START_EXCEPTION(guest_doorbell); GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x2c0) addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception b interrupt_return /* Guest Doorbell critical Interrupt */ START_EXCEPTION(guest_doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2e0) bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_nmi_exception b ret_from_crit_except /* Hypervisor call */ START_EXCEPTION(hypercall); NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310) addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception b interrupt_return /* Embedded Hypervisor priviledged */ START_EXCEPTION(ehpriv); NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320) addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception b interrupt_return /* LRAT Error interrupt */ START_EXCEPTION(lrat_error); NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x340) addi r3,r1,STACK_FRAME_OVERHEAD bl unknown_exception b interrupt_return .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r11) LOAD_REG_ADDR_ALTTOC(r14, r11, __start___restart_table) LOAD_REG_ADDR_ALTTOC(r15, r11, __stop___restart_table) #else LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table) LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table) #endif 300: cmpd r14,r15 beq 302f ld r11,0(r14) cmpld r10,r11 blt 301f ld r11,8(r14) cmpld r10,r11 bge 301f ld r11,16(r14) b 303f 301: addi r14,r14,24 b 300b 302: li r11,0 303: .endm /* * An interrupt came in while soft-disabled; We mark paca->irq_happened * accordingly and if the interrupt is level sensitive, we hard disable * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so * keep these in synch. */ .macro masked_interrupt_book3e paca_irq full_mask std r14,PACA_EXGEN+EX_R14(r13) std r15,PACA_EXGEN+EX_R15(r13) lbz r10,PACAIRQHAPPENED(r13) .if \full_mask == 1 ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS .else ori r10,r10,\paca_irq .endif stb r10,PACAIRQHAPPENED(r13) .if \full_mask == 1 xori r11,r11,MSR_EE /* clear MSR_EE */ mtspr SPRN_SRR1,r11 .endif mfspr r10,SPRN_SRR0 SEARCH_RESTART_TABLE cmpdi r11,0 beq 1f mtspr SPRN_SRR0,r11 /* return to restart address */ 1: lwz r11,PACA_EXGEN+EX_CR(r13) mtcr r11 ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) mfspr r13,SPRN_SPRG_GEN_SCRATCH rfi b . .endm masked_interrupt_book3e_0x500: masked_interrupt_book3e PACA_IRQ_EE 1 masked_interrupt_book3e_0x900: ACK_DEC(r10); masked_interrupt_book3e PACA_IRQ_DEC 0 masked_interrupt_book3e_0x980: ACK_FIT(r10); masked_interrupt_book3e PACA_IRQ_DEC 0 masked_interrupt_book3e_0x280: masked_interrupt_book3e_0x2c0: masked_interrupt_book3e PACA_IRQ_DBELL 0 /* * This is called from 0x300 and 0x400 handlers after the prologs with * r14 and r15 containing the fault address and error code, with the * original values stashed away in the PACA */ storage_fault_common: addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault b interrupt_return /* * Alignment exception doesn't fit entirely in the 0x100 bytes so it * continues here. */ alignment_more: addi r3,r1,STACK_FRAME_OVERHEAD bl alignment_exception REST_NVGPRS(r1) b interrupt_return /* * Trampolines used when spotting a bad kernel stack pointer in * the exception entry code. * * TODO: move some bits like SRR0 read to trampoline, pass PACA * index around, etc... to handle crit & mcheck */ BAD_STACK_TRAMPOLINE(0x000) BAD_STACK_TRAMPOLINE(0x100) BAD_STACK_TRAMPOLINE(0x200) BAD_STACK_TRAMPOLINE(0x220) BAD_STACK_TRAMPOLINE(0x260) BAD_STACK_TRAMPOLINE(0x280) BAD_STACK_TRAMPOLINE(0x2a0) BAD_STACK_TRAMPOLINE(0x2c0) BAD_STACK_TRAMPOLINE(0x2e0) BAD_STACK_TRAMPOLINE(0x300) BAD_STACK_TRAMPOLINE(0x310) BAD_STACK_TRAMPOLINE(0x320) BAD_STACK_TRAMPOLINE(0x340) BAD_STACK_TRAMPOLINE(0x400) BAD_STACK_TRAMPOLINE(0x500) BAD_STACK_TRAMPOLINE(0x600) BAD_STACK_TRAMPOLINE(0x700) BAD_STACK_TRAMPOLINE(0x800) BAD_STACK_TRAMPOLINE(0x900) BAD_STACK_TRAMPOLINE(0x980) BAD_STACK_TRAMPOLINE(0x9f0) BAD_STACK_TRAMPOLINE(0xa00) BAD_STACK_TRAMPOLINE(0xb00) BAD_STACK_TRAMPOLINE(0xc00) BAD_STACK_TRAMPOLINE(0xd00) BAD_STACK_TRAMPOLINE(0xd08) BAD_STACK_TRAMPOLINE(0xe00) BAD_STACK_TRAMPOLINE(0xf00) BAD_STACK_TRAMPOLINE(0xf20) .globl bad_stack_book3e bad_stack_book3e: /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ ld r1,PACAEMERGSP(r13) subi r1,r1,64+INT_FRAME_SIZE std r10,_NIP(r1) std r11,_MSR(r1) ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */ lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */ std r10,GPR1(r1) std r11,_CCR(r1) mfspr r10,SPRN_DEAR mfspr r11,SPRN_ESR std r10,_DEAR(r1) std r11,_ESR(r1) SAVE_GPR(0, r1); /* save r0 in stackframe */ \ SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \ ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ std r3,GPR10(r1); /* save r10 to stackframe */ \ std r4,GPR11(r1); /* save r11 to stackframe */ \ SAVE_GPR(12, r1); /* save r12 in stackframe */ \ std r5,GPR13(r1); /* save it to stackframe */ \ mflr r10 mfctr r11 mfxer r12 std r10,_LINK(r1) std r11,_CTR(r1) std r12,_XER(r1) SAVE_NVGPRS(r1) lhz r12,PACA_TRAP_SAVE(r13) std r12,_TRAP(r1) addi r11,r1,INT_FRAME_SIZE std r11,0(r1) ZEROIZE_GPR(12) std r12,0(r11) LOAD_PACA_TOC() 1: addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_bad_stack b 1b /* * Setup the initial TLB for a core. This current implementation * assume that whatever we are running off will not conflict with * the new mapping at PAGE_OFFSET. */ _GLOBAL(initial_tlb_book3e) /* Look for the first TLB with IPROT set */ mfspr r4,SPRN_TLB0CFG andi. r3,r4,TLBnCFG_IPROT lis r3,MAS0_TLBSEL(0)@h bne found_iprot mfspr r4,SPRN_TLB1CFG andi. r3,r4,TLBnCFG_IPROT lis r3,MAS0_TLBSEL(1)@h bne found_iprot mfspr r4,SPRN_TLB2CFG andi. r3,r4,TLBnCFG_IPROT lis r3,MAS0_TLBSEL(2)@h bne found_iprot lis r3,MAS0_TLBSEL(3)@h mfspr r4,SPRN_TLB3CFG /* fall through */ found_iprot: andi. r5,r4,TLBnCFG_HES bne have_hes mflr r8 /* save LR */ /* 1. Find the index of the entry we're executing in * * r3 = MAS0_TLBSEL (for the iprot array) * r4 = SPRN_TLBnCFG */ bcl 20,31,$+4 /* Find our address */ invstr: mflr r6 /* Make it accessible */ mfmsr r7 rlwinm r5,r7,27,31,31 /* extract MSR[IS] */ mfspr r7,SPRN_PID slwi r7,r7,16 or r7,r7,r5 mtspr SPRN_MAS6,r7 tlbsx 0,r6 /* search MSR[IS], SPID=PID */ mfspr r3,SPRN_MAS0 rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */ mfspr r7,SPRN_MAS1 /* Insure IPROT set */ oris r7,r7,MAS1_IPROT@h mtspr SPRN_MAS1,r7 tlbwe /* 2. Invalidate all entries except the entry we're executing in * * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in * r4 = SPRN_TLBnCFG * r5 = ESEL of entry we are running in */ andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */ li r6,0 /* Set Entry counter to 0 */ 1: mr r7,r3 /* Set MAS0(TLBSEL) */ rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ mtspr SPRN_MAS0,r7 tlbre mfspr r7,SPRN_MAS1 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ cmpw r5,r6 beq skpinv /* Dont update the current execution TLB */ mtspr SPRN_MAS1,r7 tlbwe isync skpinv: addi r6,r6,1 /* Increment */ cmpw r6,r4 /* Are we done? */ bne 1b /* If not, repeat */ /* Invalidate all TLBs */ PPC_TLBILX_ALL(0,R0) sync isync /* 3. Setup a temp mapping and jump to it * * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in * r5 = ESEL of entry we are running in */ andi. r7,r5,0x1 /* Find an entry not used and is non-zero */ addi r7,r7,0x1 mr r4,r3 /* Set MAS0(TLBSEL) = 1 */ mtspr SPRN_MAS0,r4 tlbre rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */ mtspr SPRN_MAS0,r4 mfspr r7,SPRN_MAS1 xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */ mtspr SPRN_MAS1,r6 tlbwe mfmsr r6 xori r6,r6,MSR_IS mtspr SPRN_SRR1,r6 bcl 20,31,$+4 /* Find our address */ 1: mflr r6 addi r6,r6,(2f - 1b) mtspr SPRN_SRR0,r6 rfi 2: /* 4. Clear out PIDs & Search info * * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping * r5 = MAS3 */ li r6,0 mtspr SPRN_MAS6,r6 mtspr SPRN_PID,r6 /* 5. Invalidate mapping we started in * * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping * r5 = MAS3 */ mtspr SPRN_MAS0,r3 tlbre mfspr r6,SPRN_MAS1 rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */ mtspr SPRN_MAS1,r6 tlbwe sync isync /* 6. Setup KERNELBASE mapping in TLB[0] * * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping * r5 = MAS3 */ rlwinm r3,r3,0,16,3 /* clear ESEL */ mtspr SPRN_MAS0,r3 lis r6,(MAS1_VALID|MAS1_IPROT)@h ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l mtspr SPRN_MAS1,r6 LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED) mtspr SPRN_MAS2,r6 rlwinm r5,r5,0,0,25 ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX mtspr SPRN_MAS3,r5 li r5,-1 rlwinm r5,r5,0,0,25 tlbwe /* 7. Jump to KERNELBASE mapping * * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping */ /* Now we branch the new virtual address mapped by this entry */ bcl 20,31,$+4 /* Find our address */ 1: mflr r6 addi r6,r6,(2f - 1b) tovirt(r6,r6) lis r7,MSR_KERNEL@h ori r7,r7,MSR_KERNEL@l mtspr SPRN_SRR0,r6 mtspr SPRN_SRR1,r7 rfi /* start execution out of TLB1[0] entry */ 2: /* 8. Clear out the temp mapping * * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in */ mtspr SPRN_MAS0,r4 tlbre mfspr r5,SPRN_MAS1 rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */ mtspr SPRN_MAS1,r5 tlbwe sync isync /* We translate LR and return */ tovirt(r8,r8) mtlr r8 blr have_hes: /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the * kernel linear mapping. We also set MAS8 once for all here though * that will have to be made dependent on whether we are running under * a hypervisor I suppose. */ /* BEWARE, MAGIC * This code is called as an ordinary function on the boot CPU. But to * avoid duplication, this code is also used in SCOM bringup of * secondary CPUs. We read the code between the initial_tlb_code_start * and initial_tlb_code_end labels one instruction at a time and RAM it * into the new core via SCOM. That doesn't process branches, so there * must be none between those two labels. It also means if this code * ever takes any parameters, the SCOM code must also be updated to * provide them. */ .globl a2_tlbinit_code_start a2_tlbinit_code_start: ori r11,r3,MAS0_WQ_ALLWAYS oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ mtspr SPRN_MAS0,r11 lis r3,(MAS1_VALID | MAS1_IPROT)@h ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT mtspr SPRN_MAS1,r3 LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M) mtspr SPRN_MAS2,r3 li r3,MAS3_SR | MAS3_SW | MAS3_SX mtspr SPRN_MAS7_MAS3,r3 li r3,0 mtspr SPRN_MAS8,r3 /* Write the TLB entry */ tlbwe .globl a2_tlbinit_after_linear_map a2_tlbinit_after_linear_map: /* Now we branch the new virtual address mapped by this entry */ #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r5) LOAD_REG_ADDR_ALTTOC(r3, r5, 1f) #else LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) #endif mtctr r3 bctr 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything * else (including IPROTed things left by firmware) * r4 = TLBnCFG * r3 = current address (more or less) */ li r5,0 mtspr SPRN_MAS6,r5 tlbsx 0,r3 rlwinm r9,r4,0,TLBnCFG_N_ENTRY rlwinm r10,r4,8,0xff addi r10,r10,-1 /* Get inner loop mask */ li r3,1 mfspr r5,SPRN_MAS1 rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) mfspr r6,SPRN_MAS2 rldicr r6,r6,0,51 /* Extract EPN */ mfspr r7,SPRN_MAS0 rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ rlwinm r8,r7,16,0xfff /* Extract ESEL */ 2: add r4,r3,r8 and r4,r4,r10 rlwimi r7,r4,16,MAS0_ESEL_MASK mtspr SPRN_MAS0,r7 mtspr SPRN_MAS1,r5 mtspr SPRN_MAS2,r6 tlbwe addi r3,r3,1 and. r4,r3,r10 bne 3f addis r6,r6,(1<<30)@h 3: cmpw r3,r9 blt 2b .globl a2_tlbinit_after_iprot_flush a2_tlbinit_after_iprot_flush: PPC_TLBILX(0,0,R0) sync isync .globl a2_tlbinit_code_end a2_tlbinit_code_end: /* We translate LR and return */ mflr r3 tovirt(r3,r3) mtlr r3 blr /* * Main entry (boot CPU, thread 0) * * We enter here from head_64.S, possibly after the prom_init trampoline * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits * mode. Anything else is as it was left by the bootloader * * Initial requirements of this port: * * - Kernel loaded at 0 physical * - A good lump of memory mapped 0:0 by UTLB entry 0 * - MSR:IS & MSR:DS set to 0 * * Note that some of the above requirements will be relaxed in the future * as the kernel becomes smarter at dealing with different initial conditions * but for now you have to be careful */ _GLOBAL(start_initialization_book3e) mflr r28 /* First, we need to setup some initial TLBs to map the kernel * text, data and bss at PAGE_OFFSET. We don't have a real mode * and always use AS 0, so we just set it up to match our link * address and never use 0 based addresses. */ bl initial_tlb_book3e /* Init global core bits */ bl init_core_book3e /* Init per-thread bits */ bl init_thread_book3e /* Return to common init code */ tovirt(r28,r28) mtlr r28 blr /* * Secondary core/processor entry * * This is entered for thread 0 of a secondary core, all other threads * are expected to be stopped. It's similar to start_initialization_book3e * except that it's generally entered from the holding loop in head_64.S * after CPUs have been gathered by Open Firmware. * * We assume we are in 32 bits mode running with whatever TLB entry was * set for us by the firmware or POR engine. */ _GLOBAL(book3e_secondary_core_init_tlb_set) li r4,1 b generic_secondary_smp_init _GLOBAL(book3e_secondary_core_init) mflr r28 /* Do we need to setup initial TLB entry ? */ cmplwi r4,0 bne 2f /* Setup TLB for this core */ bl initial_tlb_book3e /* We can return from the above running at a different * address, so recalculate r2 (TOC) */ bl relative_toc /* Init global core bits */ 2: bl init_core_book3e /* Init per-thread bits */ 3: bl init_thread_book3e /* Return to common init code at proper virtual address. * * Due to various previous assumptions, we know we entered this * function at either the final PAGE_OFFSET mapping or using a * 1:1 mapping at 0, so we don't bother doing a complicated check * here, we just ensure the return address has the right top bits. * * Note that if we ever want to be smarter about where we can be * started from, we have to be careful that by the time we reach * the code below we may already be running at a different location * than the one we were called from since initial_tlb_book3e can * have moved us already. */ cmpdi cr0,r28,0 blt 1f lis r3,PAGE_OFFSET@highest sldi r3,r3,32 or r28,r28,r3 1: mtlr r28 blr _GLOBAL(book3e_secondary_thread_init) mflr r28 b 3b .globl init_core_book3e init_core_book3e: /* Establish the interrupt vector base */ tovirt(r2,r2) LOAD_REG_ADDR(r3, interrupt_base_book3e) mtspr SPRN_IVPR,r3 sync blr init_thread_book3e: lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h mtspr SPRN_EPCR,r3 /* Make sure interrupts are off */ wrteei 0 /* disable all timers and clear out status */ li r3,0 mtspr SPRN_TCR,r3 mfspr r3,SPRN_TSR mtspr SPRN_TSR,r3 blr _GLOBAL(__setup_base_ivors) SET_IVOR(0, 0x020) /* Critical Input */ SET_IVOR(1, 0x000) /* Machine Check */ SET_IVOR(2, 0x060) /* Data Storage */ SET_IVOR(3, 0x080) /* Instruction Storage */ SET_IVOR(4, 0x0a0) /* External Input */ SET_IVOR(5, 0x0c0) /* Alignment */ SET_IVOR(6, 0x0e0) /* Program */ SET_IVOR(7, 0x100) /* FP Unavailable */ SET_IVOR(8, 0x120) /* System Call */ SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ SET_IVOR(10, 0x160) /* Decrementer */ SET_IVOR(11, 0x180) /* Fixed Interval Timer */ SET_IVOR(12, 0x1a0) /* Watchdog Timer */ SET_IVOR(13, 0x1c0) /* Data TLB Error */ SET_IVOR(14, 0x1e0) /* Instruction TLB Error */ SET_IVOR(15, 0x040) /* Debug */ sync blr _GLOBAL(setup_altivec_ivors) SET_IVOR(32, 0x200) /* AltiVec Unavailable */ SET_IVOR(33, 0x220) /* AltiVec Assist */ blr _GLOBAL(setup_perfmon_ivor) SET_IVOR(35, 0x260) /* Performance Monitor */ blr _GLOBAL(setup_doorbell_ivors) SET_IVOR(36, 0x280) /* Processor Doorbell */ SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ blr _GLOBAL(setup_ehv_ivors) SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ blr _GLOBAL(setup_lrat_ivor) SET_IVOR(42, 0x340) /* LRAT Error */ blr
aixcc-public/challenge-001-exemplar-source
5,148
arch/powerpc/kernel/swsusp_asm64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * PowerPC 64-bit swsusp implementation * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> */ #include <linux/threads.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/feature-fixups.h> /* * Structure for storing CPU registers on the save area. */ #define SL_r1 0x00 /* stack pointer */ #define SL_PC 0x08 #define SL_MSR 0x10 #define SL_SDR1 0x18 #define SL_XER 0x20 #define SL_TB 0x40 #define SL_r2 0x48 #define SL_CR 0x50 #define SL_LR 0x58 #define SL_r12 0x60 #define SL_r13 0x68 #define SL_r14 0x70 #define SL_r15 0x78 #define SL_r16 0x80 #define SL_r17 0x88 #define SL_r18 0x90 #define SL_r19 0x98 #define SL_r20 0xa0 #define SL_r21 0xa8 #define SL_r22 0xb0 #define SL_r23 0xb8 #define SL_r24 0xc0 #define SL_r25 0xc8 #define SL_r26 0xd0 #define SL_r27 0xd8 #define SL_r28 0xe0 #define SL_r29 0xe8 #define SL_r30 0xf0 #define SL_r31 0xf8 #define SL_SPRG1 0x100 #define SL_TCR 0x108 #define SL_SIZE SL_TCR+8 /* these macros rely on the save area being * pointed to by r11 */ #define SAVE_SPR(register) \ mfspr r0, SPRN_##register ;\ std r0, SL_##register(r11) #define RESTORE_SPR(register) \ ld r0, SL_##register(r11) ;\ mtspr SPRN_##register, r0 #define SAVE_SPECIAL(special) \ mf##special r0 ;\ std r0, SL_##special(r11) #define RESTORE_SPECIAL(special) \ ld r0, SL_##special(r11) ;\ mt##special r0 #define SAVE_REGISTER(reg) \ std reg, SL_##reg(r11) #define RESTORE_REGISTER(reg) \ ld reg, SL_##reg(r11) /* space for storing cpu state */ .section .data .align 5 swsusp_save_area: .space SL_SIZE .section .text .align 5 _GLOBAL(swsusp_arch_suspend) LOAD_REG_ADDR(r11, swsusp_save_area) SAVE_SPECIAL(LR) SAVE_REGISTER(r1) SAVE_SPECIAL(CR) SAVE_SPECIAL(TB) SAVE_REGISTER(r2) SAVE_REGISTER(r12) SAVE_REGISTER(r13) SAVE_REGISTER(r14) SAVE_REGISTER(r15) SAVE_REGISTER(r16) SAVE_REGISTER(r17) SAVE_REGISTER(r18) SAVE_REGISTER(r19) SAVE_REGISTER(r20) SAVE_REGISTER(r21) SAVE_REGISTER(r22) SAVE_REGISTER(r23) SAVE_REGISTER(r24) SAVE_REGISTER(r25) SAVE_REGISTER(r26) SAVE_REGISTER(r27) SAVE_REGISTER(r28) SAVE_REGISTER(r29) SAVE_REGISTER(r30) SAVE_REGISTER(r31) SAVE_SPECIAL(MSR) SAVE_SPECIAL(XER) #ifdef CONFIG_PPC_BOOK3S_64 BEGIN_FW_FTR_SECTION SAVE_SPECIAL(SDR1) END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else SAVE_SPR(TCR) /* Save SPRG1, SPRG1 be used save paca */ SAVE_SPR(SPRG1) #endif /* we push the stack up 128 bytes but don't store the * stack pointer on the stack like a real stackframe */ addi r1,r1,-128 bl swsusp_save /* restore LR */ LOAD_REG_ADDR(r11, swsusp_save_area) RESTORE_SPECIAL(LR) addi r1,r1,128 blr /* Resume code */ _GLOBAL(swsusp_arch_resume) /* Stop pending alitvec streams and memory accesses */ BEGIN_FTR_SECTION PPC_DSSALL END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) sync LOAD_REG_ADDR(r11, restore_pblist) ld r12,0(r12) cmpdi r12,0 beq- nothing_to_copy li r15,PAGE_SIZE>>3 copyloop: ld r13,pbe_address(r12) ld r14,pbe_orig_address(r12) mtctr r15 li r10,0 copy_page_loop: ldx r0,r10,r13 stdx r0,r10,r14 addi r10,r10,8 bdnz copy_page_loop ld r12,pbe_next(r12) cmpdi r12,0 bne+ copyloop nothing_to_copy: #ifdef CONFIG_PPC_BOOK3S_64 /* flush caches */ lis r3, 0x10 mtctr r3 li r3, 0 ori r3, r3, CONFIG_KERNEL_START>>48 li r0, 48 sld r3, r3, r0 li r0, 0 1: dcbf 0,r3 addi r3,r3,0x20 bdnz 1b sync tlbia #endif LOAD_REG_ADDR(r11, swsusp_save_area) RESTORE_SPECIAL(CR) /* restore timebase */ /* load saved tb */ ld r1, SL_TB(r11) /* get upper 32 bits of it */ srdi r2, r1, 32 /* clear tb lower to avoid wrap */ li r0, 0 mttbl r0 /* set tb upper */ mttbu r2 /* set tb lower */ mttbl r1 /* restore registers */ RESTORE_REGISTER(r1) RESTORE_REGISTER(r2) RESTORE_REGISTER(r12) RESTORE_REGISTER(r13) RESTORE_REGISTER(r14) RESTORE_REGISTER(r15) RESTORE_REGISTER(r16) RESTORE_REGISTER(r17) RESTORE_REGISTER(r18) RESTORE_REGISTER(r19) RESTORE_REGISTER(r20) RESTORE_REGISTER(r21) RESTORE_REGISTER(r22) RESTORE_REGISTER(r23) RESTORE_REGISTER(r24) RESTORE_REGISTER(r25) RESTORE_REGISTER(r26) RESTORE_REGISTER(r27) RESTORE_REGISTER(r28) RESTORE_REGISTER(r29) RESTORE_REGISTER(r30) RESTORE_REGISTER(r31) #ifdef CONFIG_PPC_BOOK3S_64 /* can't use RESTORE_SPECIAL(MSR) */ ld r0, SL_MSR(r11) mtmsrd r0, 0 BEGIN_FW_FTR_SECTION RESTORE_SPECIAL(SDR1) END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else /* Restore SPRG1, be used to save paca */ ld r0, SL_SPRG1(r11) mtsprg 1, r0 RESTORE_SPECIAL(MSR) /* Restore TCR and clear any pending bits in TSR. */ RESTORE_SPR(TCR) lis r0, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h mtspr SPRN_TSR, r0 /* Kick decrementer */ li r0, 1 mtdec r0 /* Invalidate all tlbs */ bl _tlbil_all #endif RESTORE_SPECIAL(XER) sync addi r1,r1,-128 #ifdef CONFIG_PPC_BOOK3S_64 bl slb_flush_and_restore_bolted #endif bl do_after_copyback addi r1,r1,128 LOAD_REG_ADDR(r11, swsusp_save_area) RESTORE_SPECIAL(LR) li r3, 0 blr
aixcc-public/challenge-001-exemplar-source
8,167
arch/powerpc/kernel/kvm_emul.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright SUSE Linux Products GmbH 2010 * Copyright 2010-2011 Freescale Semiconductor, Inc. * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/asm-offsets.h> #include <asm/asm-compat.h> #define KVM_MAGIC_PAGE (-4096) #ifdef CONFIG_64BIT #define LL64(reg, offs, reg2) ld reg, (offs)(reg2) #define STL64(reg, offs, reg2) std reg, (offs)(reg2) #else #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2) #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2) #endif #define SCRATCH_SAVE \ /* Enable critical section. We are critical if \ shared->critical == r1 */ \ STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \ \ /* Save state */ \ PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ mfcr r31; \ stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); #define SCRATCH_RESTORE \ /* Restore state */ \ PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \ mtcr r30; \ PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ \ /* Disable critical section. We are critical if \ shared->critical == r1 and r2 is always != r1 */ \ STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); .global kvm_template_start kvm_template_start: .global kvm_emulate_mtmsrd kvm_emulate_mtmsrd: SCRATCH_SAVE /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) lis r30, (~(MSR_EE | MSR_RI))@h ori r30, r30, (~(MSR_EE | MSR_RI))@l and r31, r31, r30 /* OR the register's (MSR_EE|MSR_RI) on MSR */ kvm_emulate_mtmsrd_reg: ori r30, r0, 0 andi. r30, r30, (MSR_EE|MSR_RI) or r31, r31, r30 /* Put MSR back into magic page */ STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Check if we have to fetch an interrupt */ lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) cmpwi r31, 0 beq+ no_check /* Check if we may trigger an interrupt */ andi. r30, r30, MSR_EE beq no_check SCRATCH_RESTORE /* Nag hypervisor */ kvm_emulate_mtmsrd_orig_ins: tlbsync b kvm_emulate_mtmsrd_branch no_check: SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_mtmsrd_branch: b . kvm_emulate_mtmsrd_end: .global kvm_emulate_mtmsrd_branch_offs kvm_emulate_mtmsrd_branch_offs: .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 .global kvm_emulate_mtmsrd_reg_offs kvm_emulate_mtmsrd_reg_offs: .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 .global kvm_emulate_mtmsrd_orig_ins_offs kvm_emulate_mtmsrd_orig_ins_offs: .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4 .global kvm_emulate_mtmsrd_len kvm_emulate_mtmsrd_len: .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 #define MSR_SAFE_BITS (MSR_EE | MSR_RI) #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS .global kvm_emulate_mtmsr kvm_emulate_mtmsr: SCRATCH_SAVE /* Fetch old MSR in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Find the changed bits between old and new MSR */ kvm_emulate_mtmsr_reg1: ori r30, r0, 0 xor r31, r30, r31 /* Check if we need to really do mtmsr */ LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) and. r31, r31, r30 /* No critical bits changed? Maybe we can stay in the guest. */ beq maybe_stay_in_guest do_mtmsr: SCRATCH_RESTORE /* Just fire off the mtmsr if it's critical */ kvm_emulate_mtmsr_orig_ins: mtmsr r0 b kvm_emulate_mtmsr_branch maybe_stay_in_guest: /* Get the target register in r30 */ kvm_emulate_mtmsr_reg2: ori r30, r0, 0 /* Put MSR into magic page because we don't call mtmsr */ STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Check if we have to fetch an interrupt */ lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) cmpwi r31, 0 beq+ no_mtmsr /* Check if we may trigger an interrupt */ andi. r31, r30, MSR_EE bne do_mtmsr no_mtmsr: SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_mtmsr_branch: b . kvm_emulate_mtmsr_end: .global kvm_emulate_mtmsr_branch_offs kvm_emulate_mtmsr_branch_offs: .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_reg1_offs kvm_emulate_mtmsr_reg1_offs: .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_reg2_offs kvm_emulate_mtmsr_reg2_offs: .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_orig_ins_offs kvm_emulate_mtmsr_orig_ins_offs: .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_len kvm_emulate_mtmsr_len: .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 #ifdef CONFIG_BOOKE /* also used for wrteei 1 */ .global kvm_emulate_wrtee kvm_emulate_wrtee: SCRATCH_SAVE /* Fetch old MSR in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Insert new MSR[EE] */ kvm_emulate_wrtee_reg: ori r30, r0, 0 rlwimi r31, r30, 0, MSR_EE /* * If MSR[EE] is now set, check for a pending interrupt. * We could skip this if MSR[EE] was already on, but that * should be rare, so don't bother. */ andi. r30, r30, MSR_EE /* Put MSR into magic page because we don't call wrtee */ STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) beq no_wrtee /* Check if we have to fetch an interrupt */ lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) cmpwi r30, 0 bne do_wrtee no_wrtee: SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_wrtee_branch: b . do_wrtee: SCRATCH_RESTORE /* Just fire off the wrtee if it's critical */ kvm_emulate_wrtee_orig_ins: wrtee r0 b kvm_emulate_wrtee_branch kvm_emulate_wrtee_end: .global kvm_emulate_wrtee_branch_offs kvm_emulate_wrtee_branch_offs: .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4 .global kvm_emulate_wrtee_reg_offs kvm_emulate_wrtee_reg_offs: .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4 .global kvm_emulate_wrtee_orig_ins_offs kvm_emulate_wrtee_orig_ins_offs: .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4 .global kvm_emulate_wrtee_len kvm_emulate_wrtee_len: .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4 .global kvm_emulate_wrteei_0 kvm_emulate_wrteei_0: SCRATCH_SAVE /* Fetch old MSR in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Remove MSR_EE from old MSR */ rlwinm r31, r31, 0, ~MSR_EE /* Write new MSR value back */ STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_wrteei_0_branch: b . kvm_emulate_wrteei_0_end: .global kvm_emulate_wrteei_0_branch_offs kvm_emulate_wrteei_0_branch_offs: .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4 .global kvm_emulate_wrteei_0_len kvm_emulate_wrteei_0_len: .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4 #endif /* CONFIG_BOOKE */ #ifdef CONFIG_PPC_BOOK3S_32 .global kvm_emulate_mtsrin kvm_emulate_mtsrin: SCRATCH_SAVE LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) andi. r31, r31, MSR_DR | MSR_IR beq kvm_emulate_mtsrin_reg1 SCRATCH_RESTORE kvm_emulate_mtsrin_orig_ins: nop b kvm_emulate_mtsrin_branch kvm_emulate_mtsrin_reg1: /* rX >> 26 */ rlwinm r30,r0,6,26,29 kvm_emulate_mtsrin_reg2: stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30) SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_mtsrin_branch: b . kvm_emulate_mtsrin_end: .global kvm_emulate_mtsrin_branch_offs kvm_emulate_mtsrin_branch_offs: .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_reg1_offs kvm_emulate_mtsrin_reg1_offs: .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_reg2_offs kvm_emulate_mtsrin_reg2_offs: .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_orig_ins_offs kvm_emulate_mtsrin_orig_ins_offs: .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_len kvm_emulate_mtsrin_len: .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 #endif /* CONFIG_PPC_BOOK3S_32 */ .balign 4 .global kvm_tmp kvm_tmp: .space (64 * 1024) .global kvm_tmp_end kvm_tmp_end: .global kvm_template_end kvm_template_end:
aixcc-public/challenge-001-exemplar-source
13,442
arch/powerpc/kernel/entry_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). * * This file contains the system call entry code, context switch * code, and exception/interrupt return code for PowerPC. */ #include <linux/errno.h> #include <linux/err.h> #include <linux/sys.h> #include <linux/threads.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/ptrace.h> #include <asm/export.h> #include <asm/feature-fixups.h> #include <asm/barrier.h> #include <asm/kup.h> #include <asm/bug.h> #include <asm/interrupt.h> #include "head_32.h" /* * powerpc relies on return from interrupt/syscall being context synchronising * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional * synchronisation instructions. */ /* * Align to 4k in order to ensure that all functions modyfing srr0/srr1 * fit into one page in order to not encounter a TLB miss between the * modification of srr0/srr1 and the associated rfi. */ .align 12 #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500) .globl prepare_transfer_to_handler prepare_transfer_to_handler: /* if from kernel, check interrupted DOZE/NAP mode */ lwz r12,TI_LOCAL_FLAGS(r2) mtcrf 0x01,r12 bt- 31-TLF_NAPPING,4f bt- 31-TLF_SLEEPING,7f blr 4: rlwinm r12,r12,0,~_TLF_NAPPING stw r12,TI_LOCAL_FLAGS(r2) b power_save_ppc32_restore 7: rlwinm r12,r12,0,~_TLF_SLEEPING stw r12,TI_LOCAL_FLAGS(r2) lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ rlwinm r9,r9,0,~MSR_EE lwz r12,_LINK(r11) /* and return to address in LR */ REST_GPR(2, r11) b fast_exception_return _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) .globl __kuep_lock __kuep_lock: lwz r9, THREAD+THSR0(r2) update_user_segments_by_4 r9, r10, r11, r12 blr __kuep_unlock: lwz r9, THREAD+THSR0(r2) rlwinm r9,r9,0,~SR_NX update_user_segments_by_4 r9, r10, r11, r12 blr .macro kuep_lock bl __kuep_lock .endm .macro kuep_unlock bl __kuep_unlock .endm #else .macro kuep_lock .endm .macro kuep_unlock .endm #endif .globl transfer_to_syscall transfer_to_syscall: stw r3, ORIG_GPR3(r1) stw r11, GPR1(r1) stw r11, 0(r1) mflr r12 stw r12, _LINK(r1) #ifdef CONFIG_BOOKE_OR_40x rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ #endif lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ SAVE_GPR(2, r1) addi r12,r12,STACK_FRAME_REGS_MARKER@l stw r9,_MSR(r1) li r2, INTERRUPT_SYSCALL stw r12,8(r1) stw r2,_TRAP(r1) SAVE_GPR(0, r1) SAVE_GPRS(3, 8, r1) addi r2,r10,-THREAD SAVE_NVGPRS(r1) kuep_lock /* Calling convention has r3 = regs, r4 = orig r0 */ addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r0 bl system_call_exception ret_from_syscall: addi r4,r1,STACK_FRAME_OVERHEAD li r5,0 bl syscall_exit_prepare #ifdef CONFIG_PPC_47x lis r4,icache_44x_need_flush@ha lwz r5,icache_44x_need_flush@l(r4) cmplwi cr0,r5,0 bne- 2f #endif /* CONFIG_PPC_47x */ kuep_unlock lwz r4,_LINK(r1) lwz r5,_CCR(r1) mtlr r4 lwz r7,_NIP(r1) lwz r8,_MSR(r1) cmpwi r3,0 REST_GPR(3, r1) syscall_exit_finish: mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 bne 3f mtcr r5 1: REST_GPR(2, r1) REST_GPR(1, r1) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif 3: mtcr r5 lwz r4,_CTR(r1) lwz r5,_XER(r1) REST_NVGPRS(r1) mtctr r4 mtxer r5 REST_GPR(0, r1) REST_GPRS(3, 12, r1) b 1b #ifdef CONFIG_44x 2: li r7,0 iccci r0,r0 stw r7,icache_44x_need_flush@l(r4) b 1b #endif /* CONFIG_44x */ .globl ret_from_fork ret_from_fork: REST_NVGPRS(r1) bl schedule_tail li r3,0 b ret_from_syscall .globl ret_from_kernel_thread ret_from_kernel_thread: REST_NVGPRS(r1) bl schedule_tail mtctr r14 mr r3,r15 PPC440EP_ERR42 bctrl li r3,0 b ret_from_syscall /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state * of the other is restored from its kernel stack. The memory * management hardware is updated to the second process's state. * Finally, we can return to the second process. * On entry, r3 points to the THREAD for the current task, r4 * points to the THREAD for the new task. * * This routine is always called with interrupts disabled. * * Note: there are two ways to get to the "going out" portion * of this code; either by coming in via the entry (_switch) * or via "fork" which must set up an environment equivalent * to the "_switch" path. If you change this , you'll have to * change the fork code also. * * The code which creates the new task context is in 'copy_thread' * in arch/ppc/kernel/process.c */ _GLOBAL(_switch) stwu r1,-INT_FRAME_SIZE(r1) mflr r0 stw r0,INT_FRAME_SIZE+4(r1) /* r3-r12 are caller saved -- Cort */ SAVE_NVGPRS(r1) stw r0,_NIP(r1) /* Return to switch caller */ mfcr r10 stw r10,_CCR(r1) stw r1,KSP(r3) /* Set old stack pointer */ #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all * stores it has performed on this one. */ sync #endif /* CONFIG_SMP */ tophys(r0,r4) mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */ lwz r1,KSP(r4) /* Load new stack pointer */ /* save the old current 'last' for return value */ mr r3,r2 addi r2,r4,-THREAD /* Update current */ lwz r0,_CCR(r1) mtcrf 0xFF,r0 /* r3-r12 are destroyed -- Cort */ REST_NVGPRS(r1) lwz r4,_NIP(r1) /* Return to _switch caller in new task */ mtlr r4 addi r1,r1,INT_FRAME_SIZE blr .globl fast_exception_return fast_exception_return: #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) andi. r10,r9,MSR_RI /* check for recoverable interrupt */ beq 3f /* if not, we've got problems */ #endif 2: lwz r10,_CCR(r11) REST_GPRS(1, 6, r11) mtcr r10 lwz r10,_LINK(r11) mtlr r10 /* Clear the exception marker on the stack to avoid confusing stacktrace */ li r10, 0 stw r10, 8(r11) REST_GPR(10, r11) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) mtspr SPRN_NRI, r0 #endif mtspr SPRN_SRR1,r9 mtspr SPRN_SRR0,r12 REST_GPR(9, r11) REST_GPR(12, r11) REST_GPR(11, r11) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif _ASM_NOKPROBE_SYMBOL(fast_exception_return) /* aargh, a nonrecoverable interrupt, panic */ /* aargh, we don't know which trap this is */ 3: li r10,-1 stw r10,_TRAP(r11) prepare_transfer_to_handler bl unrecoverable_exception trap /* should not get here */ .globl interrupt_return interrupt_return: lwz r4,_MSR(r1) addi r3,r1,STACK_FRAME_OVERHEAD andi. r0,r4,MSR_PR beq .Lkernel_interrupt_return bl interrupt_exit_user_prepare cmpwi r3,0 kuep_unlock bne- .Lrestore_nvgprs .Lfast_user_interrupt_return: lwz r11,_NIP(r1) lwz r12,_MSR(r1) mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 BEGIN_FTR_SECTION stwcx. r0,0,r1 /* to clear the reservation */ FTR_SECTION_ELSE lwarx r0,0,r1 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) lwz r3,_CCR(r1) lwz r4,_LINK(r1) lwz r5,_CTR(r1) lwz r6,_XER(r1) li r0,0 /* * Leaving a stale exception marker on the stack can confuse * the reliable stack unwinder later on. Clear it. */ stw r0,8(r1) REST_GPRS(7, 12, r1) mtcr r3 mtlr r4 mtctr r5 mtspr SPRN_XER,r6 REST_GPRS(2, 6, r1) REST_GPR(0, r1) REST_GPR(1, r1) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif .Lrestore_nvgprs: REST_NVGPRS(r1) b .Lfast_user_interrupt_return .Lkernel_interrupt_return: bl interrupt_exit_kernel_prepare .Lfast_kernel_interrupt_return: cmpwi cr1,r3,0 lwz r11,_NIP(r1) lwz r12,_MSR(r1) mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 BEGIN_FTR_SECTION stwcx. r0,0,r1 /* to clear the reservation */ FTR_SECTION_ELSE lwarx r0,0,r1 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) lwz r3,_LINK(r1) lwz r4,_CTR(r1) lwz r5,_XER(r1) lwz r6,_CCR(r1) li r0,0 REST_GPRS(7, 12, r1) mtlr r3 mtctr r4 mtspr SPRN_XER,r5 /* * Leaving a stale exception marker on the stack can confuse * the reliable stack unwinder later on. Clear it. */ stw r0,8(r1) REST_GPRS(2, 5, r1) bne- cr1,1f /* emulate stack store */ mtcr r6 REST_GPR(6, r1) REST_GPR(0, r1) REST_GPR(1, r1) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif 1: /* * Emulate stack store with update. New r1 value was already calculated * and updated in our interrupt regs by emulate_loadstore, but we can't * store the previous value of r1 to the stack before re-loading our * registers from it, otherwise they could be clobbered. Use * SPRG Scratch0 as temporary storage to hold the store * data, as interrupts are disabled here so it won't be clobbered. */ mtcr r6 #ifdef CONFIG_BOOKE mtspr SPRN_SPRG_WSCRATCH0, r9 #else mtspr SPRN_SPRG_SCRATCH0, r9 #endif addi r9,r1,INT_FRAME_SIZE /* get original r1 */ REST_GPR(6, r1) REST_GPR(0, r1) REST_GPR(1, r1) stw r9,0(r1) /* perform store component of stwu */ #ifdef CONFIG_BOOKE mfspr r9, SPRN_SPRG_RSCRATCH0 #else mfspr r9, SPRN_SPRG_SCRATCH0 #endif rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif _ASM_NOKPROBE_SYMBOL(interrupt_return) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) /* * Returning from a critical interrupt in user mode doesn't need * to be any different from a normal exception. For a critical * interrupt in the kernel, we just return (without checking for * preemption) since the interrupt may have happened at some crucial * place (e.g. inside the TLB miss handler), and because we will be * running with r1 pointing into critical_stack, not the current * process's kernel stack (and therefore current_thread_info() will * give the wrong answer). * We have to restore various SPRs that may have been in use at the * time of the critical interrupt. * */ #ifdef CONFIG_40x #define PPC_40x_TURN_OFF_MSR_DR \ /* avoid any possible TLB misses here by turning off MSR.DR, we \ * assume the instructions here are mapped by a pinned TLB entry */ \ li r10,MSR_IR; \ mtmsr r10; \ isync; \ tophys(r1, r1); #else #define PPC_40x_TURN_OFF_MSR_DR #endif #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \ REST_NVGPRS(r1); \ lwz r3,_MSR(r1); \ andi. r3,r3,MSR_PR; \ bne interrupt_return; \ REST_GPR(0, r1); \ REST_GPRS(2, 8, r1); \ lwz r10,_XER(r1); \ lwz r11,_CTR(r1); \ mtspr SPRN_XER,r10; \ mtctr r11; \ stwcx. r0,0,r1; /* to clear the reservation */ \ lwz r11,_LINK(r1); \ mtlr r11; \ lwz r10,_CCR(r1); \ mtcrf 0xff,r10; \ PPC_40x_TURN_OFF_MSR_DR; \ lwz r9,_DEAR(r1); \ lwz r10,_ESR(r1); \ mtspr SPRN_DEAR,r9; \ mtspr SPRN_ESR,r10; \ lwz r11,_NIP(r1); \ lwz r12,_MSR(r1); \ mtspr exc_lvl_srr0,r11; \ mtspr exc_lvl_srr1,r12; \ REST_GPRS(9, 12, r1); \ REST_GPR(1, r1); \ exc_lvl_rfi; \ b .; /* prevent prefetch past exc_lvl_rfi */ #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \ lwz r9,_##exc_lvl_srr0(r1); \ lwz r10,_##exc_lvl_srr1(r1); \ mtspr SPRN_##exc_lvl_srr0,r9; \ mtspr SPRN_##exc_lvl_srr1,r10; #if defined(CONFIG_PPC_E500) #ifdef CONFIG_PHYS_64BIT #define RESTORE_MAS7 \ lwz r11,MAS7(r1); \ mtspr SPRN_MAS7,r11; #else #define RESTORE_MAS7 #endif /* CONFIG_PHYS_64BIT */ #define RESTORE_MMU_REGS \ lwz r9,MAS0(r1); \ lwz r10,MAS1(r1); \ lwz r11,MAS2(r1); \ mtspr SPRN_MAS0,r9; \ lwz r9,MAS3(r1); \ mtspr SPRN_MAS1,r10; \ lwz r10,MAS6(r1); \ mtspr SPRN_MAS2,r11; \ mtspr SPRN_MAS3,r9; \ mtspr SPRN_MAS6,r10; \ RESTORE_MAS7; #elif defined(CONFIG_44x) #define RESTORE_MMU_REGS \ lwz r9,MMUCR(r1); \ mtspr SPRN_MMUCR,r9; #else #define RESTORE_MMU_REGS #endif #ifdef CONFIG_40x .globl ret_from_crit_exc ret_from_crit_exc: lis r9,crit_srr0@ha; lwz r9,crit_srr0@l(r9); lis r10,crit_srr1@ha; lwz r10,crit_srr1@l(r10); mtspr SPRN_SRR0,r9; mtspr SPRN_SRR1,r10; RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) #endif /* CONFIG_40x */ #ifdef CONFIG_BOOKE .globl ret_from_crit_exc ret_from_crit_exc: RESTORE_xSRR(SRR0,SRR1); RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) .globl ret_from_debug_exc ret_from_debug_exc: RESTORE_xSRR(SRR0,SRR1); RESTORE_xSRR(CSRR0,CSRR1); RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI) _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) .globl ret_from_mcheck_exc ret_from_mcheck_exc: RESTORE_xSRR(SRR0,SRR1); RESTORE_xSRR(CSRR0,CSRR1); RESTORE_xSRR(DSRR0,DSRR1); RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) #endif /* CONFIG_BOOKE */ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
aixcc-public/challenge-001-exemplar-source
1,934
arch/powerpc/kernel/idle_85xx.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. * Dave Liu <daveliu@freescale.com> * copy from idle_6xx.S and modify for e500 based processor, * implement the power_save function in idle. */ #include <linux/threads.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/feature-fixups.h> .text _GLOBAL(e500_idle) lwz r4,TI_LOCAL_FLAGS(r2) /* set napping bit */ ori r4,r4,_TLF_NAPPING /* so when we take an exception */ stw r4,TI_LOCAL_FLAGS(r2) /* it will return to our caller */ #ifdef CONFIG_PPC_E500MC wrteei 1 1: wait /* * Guard against spurious wakeups (e.g. from a hypervisor) -- * any real interrupt will cause us to return to LR due to * _TLF_NAPPING. */ b 1b #else /* Check if we can nap or doze, put HID0 mask in r3 */ lis r3,0 BEGIN_FTR_SECTION lis r3,HID0_DOZE@h END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) BEGIN_FTR_SECTION /* Now check if user enabled NAP mode */ lis r4,powersave_nap@ha lwz r4,powersave_nap@l(r4) cmpwi 0,r4,0 beq 1f stwu r1,-16(r1) mflr r0 stw r0,20(r1) bl flush_dcache_L1 lwz r0,20(r1) addi r1,r1,16 mtlr r0 lis r3,HID0_NAP@h END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 1: /* Go to NAP or DOZE now */ mfspr r4,SPRN_HID0 rlwinm r4,r4,0,~(HID0_DOZE|HID0_NAP|HID0_SLEEP) or r4,r4,r3 isync mtspr SPRN_HID0,r4 isync mfmsr r7 oris r7,r7,MSR_WE@h ori r7,r7,MSR_EE msync mtmsr r7 isync 2: b 2b #endif /* !E500MC */ /* * Return from NAP/DOZE mode, restore some CPU specific registers, * r2 containing address of current. * r11 points to the exception frame. * We have to preserve r10. */ _GLOBAL(power_save_ppc32_restore) lwz r9,_LINK(r11) /* interrupted in e500_idle */ stw r9,_NIP(r11) /* make it do a blr */ blr _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
aixcc-public/challenge-001-exemplar-source
17,689
arch/powerpc/kernel/interrupt_64.S
#include <asm/asm-offsets.h> #include <asm/bug.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/exception-64s.h> #else #include <asm/exception-64e.h> #endif #include <asm/feature-fixups.h> #include <asm/head-64.h> #include <asm/hw_irq.h> #include <asm/kup.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/ptrace.h> .align 7 .macro DEBUG_SRR_VALID srr #ifdef CONFIG_PPC_RFI_SRR_DEBUG .ifc \srr,srr mfspr r11,SPRN_SRR0 ld r12,_NIP(r1) clrrdi r11,r11,2 clrrdi r12,r12,2 100: tdne r11,r12 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) mfspr r11,SPRN_SRR1 ld r12,_MSR(r1) 100: tdne r11,r12 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) .else mfspr r11,SPRN_HSRR0 ld r12,_NIP(r1) clrrdi r11,r11,2 clrrdi r12,r12,2 100: tdne r11,r12 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) mfspr r11,SPRN_HSRR1 ld r12,_MSR(r1) 100: tdne r11,r12 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) .endif #endif .endm #ifdef CONFIG_PPC_BOOK3S .macro system_call_vectored name trapnr .globl system_call_vectored_\name system_call_vectored_\name: _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) SCV_INTERRUPT_TO_KERNEL mr r10,r1 ld r1,PACAKSAVE(r13) std r10,0(r1) std r11,_NIP(r1) std r12,_MSR(r1) std r0,GPR0(r1) std r10,GPR1(r1) std r2,GPR2(r1) LOAD_PACA_TOC() mfcr r12 li r11,0 /* Save syscall parameters in r3-r8 */ SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) std r11,GPR10(r1) std r11,GPR11(r1) std r11,GPR12(r1) std r9,GPR13(r1) SAVE_NVGPRS(r1) std r11,_XER(r1) std r11,_LINK(r1) std r11,_CTR(r1) li r11,\trapnr std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) /* Calling convention has r3 = regs, r4 = orig r0 */ addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r0 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r11,-16(r3) /* "regshere" marker */ BEGIN_FTR_SECTION HMT_MEDIUM END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* * scv enters with MSR[EE]=1 and is immediately considered soft-masked. * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED, * and interrupts may be masked and pending already. * system_call_exception() will call trace_hardirqs_off() which means * interrupts could already have been blocked before trace_hardirqs_off, * but this is the best we can do. */ bl system_call_exception .Lsyscall_vectored_\name\()_exit: addi r4,r1,STACK_FRAME_OVERHEAD li r5,1 /* scv */ bl syscall_exit_prepare std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ .Lsyscall_vectored_\name\()_rst_start: lbz r11,PACAIRQHAPPENED(r13) andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l bne- syscall_vectored_\name\()_restart li r11,IRQS_ENABLED stb r11,PACAIRQSOFTMASK(r13) li r11,0 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS ld r2,_CCR(r1) ld r4,_NIP(r1) ld r5,_MSR(r1) BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) BEGIN_FTR_SECTION HMT_MEDIUM_LOW END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) cmpdi r3,0 bne .Lsyscall_vectored_\name\()_restore_regs /* rfscv returns with LR->NIA and CTR->MSR */ mtlr r4 mtctr r5 /* Could zero these as per ABI, but we may consider a stricter ABI * which preserves these if libc implementations can benefit, so * restore them for now until further measurement is done. */ REST_GPR(0, r1) REST_GPRS(4, 8, r1) /* Zero volatile regs that may contain sensitive kernel data */ ZEROIZE_GPRS(9, 12) mtspr SPRN_XER,r0 /* * We don't need to restore AMR on the way back to userspace for KUAP. * The value of AMR only matters while we're in the kernel. */ mtcr r2 REST_GPRS(2, 3, r1) REST_GPR(13, r1) REST_GPR(1, r1) RFSCV_TO_USER b . /* prevent speculative execution */ .Lsyscall_vectored_\name\()_restore_regs: mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r5 ld r3,_CTR(r1) ld r4,_LINK(r1) ld r5,_XER(r1) REST_NVGPRS(r1) REST_GPR(0, r1) mtcr r2 mtctr r3 mtlr r4 mtspr SPRN_XER,r5 REST_GPRS(2, 13, r1) REST_GPR(1, r1) RFI_TO_USER .Lsyscall_vectored_\name\()_rst_end: syscall_vectored_\name\()_restart: _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() ld r3,RESULT(r1) addi r4,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl syscall_exit_restart std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ b .Lsyscall_vectored_\name\()_rst_start 1: SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b) RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart) .endm system_call_vectored common 0x3000 /* * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0 * which is tested by system_call_exception when r0 is -1 (as set by vector * entry code). */ system_call_vectored sigill 0x7ff0 #endif /* CONFIG_PPC_BOOK3S */ .balign IFETCH_ALIGN_BYTES .globl system_call_common_real system_call_common_real: _ASM_NOKPROBE_SYMBOL(system_call_common_real) ld r10,PACAKMSR(r13) /* get MSR value for kernel */ mtmsrd r10 .balign IFETCH_ALIGN_BYTES .globl system_call_common system_call_common: _ASM_NOKPROBE_SYMBOL(system_call_common) mr r10,r1 ld r1,PACAKSAVE(r13) std r10,0(r1) std r11,_NIP(r1) std r12,_MSR(r1) std r0,GPR0(r1) std r10,GPR1(r1) std r2,GPR2(r1) #ifdef CONFIG_PPC_E500 START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION #endif LOAD_PACA_TOC() mfcr r12 li r11,0 /* Save syscall parameters in r3-r8 */ SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) std r11,GPR10(r1) std r11,GPR11(r1) std r11,GPR12(r1) std r9,GPR13(r1) SAVE_NVGPRS(r1) std r11,_XER(r1) std r11,_CTR(r1) mflr r10 /* * This clears CR0.SO (bit 28), which is the error indication on * return from this system call. */ rldimi r12,r11,28,(63-28) li r11,0xc00 std r10,_LINK(r1) std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) /* Calling convention has r3 = regs, r4 = orig r0 */ addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r0 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r11,-16(r3) /* "regshere" marker */ #ifdef CONFIG_PPC_BOOK3S li r11,1 stb r11,PACASRR_VALID(r13) #endif /* * We always enter kernel from userspace with irq soft-mask enabled and * nothing pending. system_call_exception() will call * trace_hardirqs_off(). */ li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) #ifdef CONFIG_PPC_BOOK3S li r12,-1 /* Set MSR_EE and MSR_RI */ mtmsrd r12,1 #else wrteei 1 #endif bl system_call_exception .Lsyscall_exit: addi r4,r1,STACK_FRAME_OVERHEAD li r5,0 /* !scv */ bl syscall_exit_prepare std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ #ifdef CONFIG_PPC_BOOK3S .Lsyscall_rst_start: lbz r11,PACAIRQHAPPENED(r13) andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l bne- syscall_restart #endif li r11,IRQS_ENABLED stb r11,PACAIRQSOFTMASK(r13) li r11,0 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS ld r2,_CCR(r1) ld r6,_LINK(r1) mtlr r6 #ifdef CONFIG_PPC_BOOK3S lbz r4,PACASRR_VALID(r13) cmpdi r4,0 bne 1f li r4,0 stb r4,PACASRR_VALID(r13) #endif ld r4,_NIP(r1) ld r5,_MSR(r1) mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r5 1: DEBUG_SRR_VALID srr BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) cmpdi r3,0 bne .Lsyscall_restore_regs /* Zero volatile regs that may contain sensitive kernel data */ ZEROIZE_GPR(0) ZEROIZE_GPRS(4, 12) mtctr r0 mtspr SPRN_XER,r0 .Lsyscall_restore_regs_cont: BEGIN_FTR_SECTION HMT_MEDIUM_LOW END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* * We don't need to restore AMR on the way back to userspace for KUAP. * The value of AMR only matters while we're in the kernel. */ mtcr r2 REST_GPRS(2, 3, r1) REST_GPR(13, r1) REST_GPR(1, r1) RFI_TO_USER b . /* prevent speculative execution */ .Lsyscall_restore_regs: ld r3,_CTR(r1) ld r4,_XER(r1) REST_NVGPRS(r1) mtctr r3 mtspr SPRN_XER,r4 REST_GPR(0, r1) REST_GPRS(4, 12, r1) b .Lsyscall_restore_regs_cont .Lsyscall_rst_end: #ifdef CONFIG_PPC_BOOK3S syscall_restart: _ASM_NOKPROBE_SYMBOL(syscall_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() ld r3,RESULT(r1) addi r4,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl syscall_exit_restart std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ b .Lsyscall_rst_start 1: SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b) RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart) #endif /* * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * touched, no exit work created, then this can be used. */ .balign IFETCH_ALIGN_BYTES .globl fast_interrupt_return_srr fast_interrupt_return_srr: _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr) kuap_check_amr r3, r4 ld r5,_MSR(r1) andi. r0,r5,MSR_PR #ifdef CONFIG_PPC_BOOK3S beq 1f kuap_user_restore r3, r4 b .Lfast_user_interrupt_return_srr 1: kuap_kernel_restore r3, r4 andi. r0,r5,MSR_RI li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ bne+ .Lfast_kernel_interrupt_return_srr addi r3,r1,STACK_FRAME_OVERHEAD bl unrecoverable_exception b . /* should not get here */ #else bne .Lfast_user_interrupt_return_srr b .Lfast_kernel_interrupt_return_srr #endif .macro interrupt_return_macro srr .balign IFETCH_ALIGN_BYTES .globl interrupt_return_\srr interrupt_return_\srr\(): _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()) ld r4,_MSR(r1) andi. r0,r4,MSR_PR beq interrupt_return_\srr\()_kernel interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user) addi r3,r1,STACK_FRAME_OVERHEAD bl interrupt_exit_user_prepare cmpdi r3,0 bne- .Lrestore_nvgprs_\srr .Lrestore_nvgprs_\srr\()_cont: std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ #ifdef CONFIG_PPC_BOOK3S .Linterrupt_return_\srr\()_user_rst_start: lbz r11,PACAIRQHAPPENED(r13) andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l bne- interrupt_return_\srr\()_user_restart #endif li r11,IRQS_ENABLED stb r11,PACAIRQSOFTMASK(r13) li r11,0 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS .Lfast_user_interrupt_return_\srr\(): #ifdef CONFIG_PPC_BOOK3S .ifc \srr,srr lbz r4,PACASRR_VALID(r13) .else lbz r4,PACAHSRR_VALID(r13) .endif cmpdi r4,0 li r4,0 bne 1f #endif ld r11,_NIP(r1) ld r12,_MSR(r1) .ifc \srr,srr mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 1: #ifdef CONFIG_PPC_BOOK3S stb r4,PACASRR_VALID(r13) #endif .else mtspr SPRN_HSRR0,r11 mtspr SPRN_HSRR1,r12 1: #ifdef CONFIG_PPC_BOOK3S stb r4,PACAHSRR_VALID(r13) #endif .endif DEBUG_SRR_VALID \srr #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG lbz r4,PACAIRQSOFTMASK(r13) tdnei r4,IRQS_ENABLED #endif BEGIN_FTR_SECTION ld r10,_PPR(r1) mtspr SPRN_PPR,r10 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ FTR_SECTION_ELSE ldarx r0,0,r1 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) ld r3,_CCR(r1) ld r4,_LINK(r1) ld r5,_CTR(r1) ld r6,_XER(r1) li r0,0 REST_GPRS(7, 13, r1) mtcr r3 mtlr r4 mtctr r5 mtspr SPRN_XER,r6 REST_GPRS(2, 6, r1) REST_GPR(0, r1) REST_GPR(1, r1) .ifc \srr,srr RFI_TO_USER .else HRFI_TO_USER .endif b . /* prevent speculative execution */ .Linterrupt_return_\srr\()_user_rst_end: .Lrestore_nvgprs_\srr\(): REST_NVGPRS(r1) b .Lrestore_nvgprs_\srr\()_cont #ifdef CONFIG_PPC_BOOK3S interrupt_return_\srr\()_user_restart: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() addi r3,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl interrupt_exit_user_restart std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ b .Linterrupt_return_\srr\()_user_rst_start 1: SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b) RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart) #endif .balign IFETCH_ALIGN_BYTES interrupt_return_\srr\()_kernel: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel) addi r3,r1,STACK_FRAME_OVERHEAD bl interrupt_exit_kernel_prepare std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ .Linterrupt_return_\srr\()_kernel_rst_start: ld r11,SOFTE(r1) cmpwi r11,IRQS_ENABLED stb r11,PACAIRQSOFTMASK(r13) beq .Linterrupt_return_\srr\()_soft_enabled /* * Returning to soft-disabled context. * Check if a MUST_HARD_MASK interrupt has become pending, in which * case we need to disable MSR[EE] in the return context. * * The MSR[EE] check catches among other things the short incoherency * in hard_irq_disable() between clearing MSR[EE] and setting * PACA_IRQ_HARD_DIS. */ ld r12,_MSR(r1) andi. r10,r12,MSR_EE beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled lbz r11,PACAIRQHAPPENED(r13) andi. r10,r11,PACA_IRQ_MUST_HARD_MASK bne 1f // HARD_MASK is pending // No HARD_MASK pending, clear possible HARD_DIS set by interrupt andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l stb r11,PACAIRQHAPPENED(r13) b .Lfast_kernel_interrupt_return_\srr\() 1: /* Must clear MSR_EE from _MSR */ #ifdef CONFIG_PPC_BOOK3S li r10,0 /* Clear valid before changing _MSR */ .ifc \srr,srr stb r10,PACASRR_VALID(r13) .else stb r10,PACAHSRR_VALID(r13) .endif #endif xori r12,r12,MSR_EE std r12,_MSR(r1) b .Lfast_kernel_interrupt_return_\srr\() .Linterrupt_return_\srr\()_soft_enabled: /* * In the soft-enabled case, need to double-check that we have no * pending interrupts that might have come in before we reached the * restart section of code, and restart the exit so those can be * handled. * * If there are none, it is be possible that the interrupt still * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the * interrupted context. This clear will not clobber a new pending * interrupt coming in, because we're in the restart section, so * such would return to the restart location. */ #ifdef CONFIG_PPC_BOOK3S lbz r11,PACAIRQHAPPENED(r13) andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l bne- interrupt_return_\srr\()_kernel_restart #endif li r11,0 stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS .Lfast_kernel_interrupt_return_\srr\(): cmpdi cr1,r3,0 #ifdef CONFIG_PPC_BOOK3S .ifc \srr,srr lbz r4,PACASRR_VALID(r13) .else lbz r4,PACAHSRR_VALID(r13) .endif cmpdi r4,0 li r4,0 bne 1f #endif ld r11,_NIP(r1) ld r12,_MSR(r1) .ifc \srr,srr mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 1: #ifdef CONFIG_PPC_BOOK3S stb r4,PACASRR_VALID(r13) #endif .else mtspr SPRN_HSRR0,r11 mtspr SPRN_HSRR1,r12 1: #ifdef CONFIG_PPC_BOOK3S stb r4,PACAHSRR_VALID(r13) #endif .endif DEBUG_SRR_VALID \srr BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ FTR_SECTION_ELSE ldarx r0,0,r1 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) ld r3,_LINK(r1) ld r4,_CTR(r1) ld r5,_XER(r1) ld r6,_CCR(r1) li r0,0 REST_GPRS(7, 12, r1) mtlr r3 mtctr r4 mtspr SPRN_XER,r5 /* * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse * the reliable stack unwinder later on. Clear it. */ std r0,STACK_FRAME_OVERHEAD-16(r1) REST_GPRS(2, 5, r1) bne- cr1,1f /* emulate stack store */ mtcr r6 REST_GPR(6, r1) REST_GPR(0, r1) REST_GPR(1, r1) .ifc \srr,srr RFI_TO_KERNEL .else HRFI_TO_KERNEL .endif b . /* prevent speculative execution */ 1: /* * Emulate stack store with update. New r1 value was already calculated * and updated in our interrupt regs by emulate_loadstore, but we can't * store the previous value of r1 to the stack before re-loading our * registers from it, otherwise they could be clobbered. Use * PACA_EXGEN as temporary storage to hold the store data, as * interrupts are disabled here so it won't be clobbered. */ mtcr r6 std r9,PACA_EXGEN+0(r13) addi r9,r1,INT_FRAME_SIZE /* get original r1 */ REST_GPR(6, r1) REST_GPR(0, r1) REST_GPR(1, r1) std r9,0(r1) /* perform store component of stdu */ ld r9,PACA_EXGEN+0(r13) .ifc \srr,srr RFI_TO_KERNEL .else HRFI_TO_KERNEL .endif b . /* prevent speculative execution */ .Linterrupt_return_\srr\()_kernel_rst_end: #ifdef CONFIG_PPC_BOOK3S interrupt_return_\srr\()_kernel_restart: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) LOAD_PACA_TOC() addi r3,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) bl interrupt_exit_kernel_restart std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ b .Linterrupt_return_\srr\()_kernel_rst_start 1: SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b) RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart) #endif .endm interrupt_return_macro srr #ifdef CONFIG_PPC_BOOK3S interrupt_return_macro hsrr .globl __end_soft_masked __end_soft_masked: DEFINE_FIXED_SYMBOL(__end_soft_masked, text) #endif /* CONFIG_PPC_BOOK3S */ #ifdef CONFIG_PPC_BOOK3S _GLOBAL(ret_from_fork_scv) bl schedule_tail REST_NVGPRS(r1) li r3,0 /* fork() return value */ b .Lsyscall_vectored_common_exit #endif _GLOBAL(ret_from_fork) bl schedule_tail REST_NVGPRS(r1) li r3,0 /* fork() return value */ b .Lsyscall_exit _GLOBAL(ret_from_kernel_thread) bl schedule_tail REST_NVGPRS(r1) mtctr r14 mr r3,r15 #ifdef CONFIG_PPC64_ELF_ABI_V2 mr r12,r14 #endif bctrl li r3,0 b .Lsyscall_exit
aixcc-public/challenge-001-exemplar-source
20,578
arch/powerpc/kernel/head_40x.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> * Initial PowerPC version. * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> * Rewritten for PReP * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> * Low-level exception handers, MMU support, and rewrite. * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> * PowerPC 8xx modifications. * Copyright (c) 1998-1999 TiVo, Inc. * PowerPC 403GCX modifications. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> * PowerPC 403GCX/405GP modifications. * Copyright 2000 MontaVista Software Inc. * PPC405 modifications * PowerPC 403GCX/405GP modifications. * Author: MontaVista Software, Inc. * frank_rowand@mvista.com or source@mvista.com * debbie_chu@mvista.com * * Module name: head_4xx.S * * Description: * Kernel execution entry point code. */ #include <linux/init.h> #include <linux/pgtable.h> #include <linux/sizes.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/export.h> #include "head_32.h" /* As with the other PowerPC ports, it is expected that when code * execution begins here, the following registers contain valid, yet * optional, information: * * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) * r4 - Starting address of the init RAM disk * r5 - Ending address of the init RAM disk * r6 - Start of kernel command line string (e.g. "mem=96m") * r7 - End of kernel command line string * * This is all going to change RSN when we add bi_recs....... -- Dan */ __HEAD _GLOBAL(_stext); _GLOBAL(_start); mr r31,r3 /* save device tree ptr */ /* We have to turn on the MMU right away so we get cache modes * set correctly. */ bl initial_mmu /* We now have the lower 16 Meg mapped into TLB entries, and the caches * ready to work. */ turn_on_mmu: lis r0,MSR_KERNEL@h ori r0,r0,MSR_KERNEL@l mtspr SPRN_SRR1,r0 lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 rfi /* enables MMU */ b . /* prevent prefetch past rfi */ /* * This area is used for temporarily saving registers during the * critical exception prolog. */ . = 0xc0 crit_save: _GLOBAL(crit_r10) .space 4 _GLOBAL(crit_r11) .space 4 _GLOBAL(crit_srr0) .space 4 _GLOBAL(crit_srr1) .space 4 _GLOBAL(crit_r1) .space 4 _GLOBAL(crit_dear) .space 4 _GLOBAL(crit_esr) .space 4 /* * Exception prolog for critical exceptions. This is a little different * from the normal exception prolog above since a critical exception * can potentially occur at any point during normal exception processing. * Thus we cannot use the same SPRG registers as the normal prolog above. * Instead we use a couple of words of memory at low physical addresses. * This is OK since we don't support SMP on these processors. */ .macro CRITICAL_EXCEPTION_PROLOG trapno name stw r10,crit_r10@l(0) /* save two registers to work with */ stw r11,crit_r11@l(0) mfspr r10,SPRN_SRR0 mfspr r11,SPRN_SRR1 stw r10,crit_srr0@l(0) stw r11,crit_srr1@l(0) mfspr r10,SPRN_DEAR mfspr r11,SPRN_ESR stw r10,crit_dear@l(0) stw r11,crit_esr@l(0) mfcr r10 /* save CR in r10 for now */ mfspr r11,SPRN_SRR3 /* check whether user or kernel */ andi. r11,r11,MSR_PR lis r11,(critirq_ctx-PAGE_OFFSET)@ha lwz r11,(critirq_ctx-PAGE_OFFSET)@l(r11) beq 1f /* COMING FROM USER MODE */ mfspr r11,SPRN_SPRG_THREAD /* if from user, start at top of */ lwz r11,TASK_STACK-THREAD(r11) /* this thread's kernel stack */ 1: stw r1,crit_r1@l(0) addi r1,r11,THREAD_SIZE-INT_FRAME_SIZE /* Alloc an excpt frm */ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)) /* re-enable MMU */ mtspr SPRN_SRR1, r11 lis r11, 1f@h ori r11, r11, 1f@l mtspr SPRN_SRR0, r11 rfi .text 1: \name\()_virt: lwz r11,crit_r1@l(0) stw r11,GPR1(r1) stw r11,0(r1) mr r11,r1 stw r10,_CCR(r11) /* save various registers */ stw r12,GPR12(r11) stw r9,GPR9(r11) mflr r10 stw r10,_LINK(r11) lis r9,PAGE_OFFSET@ha lwz r10,crit_r10@l(r9) lwz r12,crit_r11@l(r9) stw r10,GPR10(r11) stw r12,GPR11(r11) lwz r12,crit_dear@l(r9) lwz r9,crit_esr@l(r9) stw r12,_DEAR(r11) /* since they may have had stuff */ stw r9,_ESR(r11) /* exception was taken */ mfspr r12,SPRN_SRR2 mfspr r9,SPRN_SRR3 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ COMMON_EXCEPTION_PROLOG_END \trapno + 2 _ASM_NOKPROBE_SYMBOL(\name\()_virt) .endm /* * State at this point: * r9 saved in stack frame, now saved SRR3 & ~MSR_WE * r10 saved in crit_r10 and in stack frame, trashed * r11 saved in crit_r11 and in stack frame, * now phys stack/exception frame pointer * r12 saved in stack frame, now saved SRR2 * CR saved in stack frame, CR0.EQ = !SRR3.PR * LR, DEAR, ESR in stack frame * r1 saved in stack frame, now virt stack/excframe pointer * r0, r3-r8 saved in stack frame */ /* * Exception vectors. */ #define CRITICAL_EXCEPTION(n, label, hdlr) \ START_EXCEPTION(n, label); \ CRITICAL_EXCEPTION_PROLOG n label; \ prepare_transfer_to_handler; \ bl hdlr; \ b ret_from_crit_exc /* * 0x0100 - Critical Interrupt Exception */ CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception) /* * 0x0200 - Machine Check Exception */ CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) /* * 0x0300 - Data Storage Exception * This happens for just a few reasons. U0 set (but we don't do that), * or zone protection fault (user violation, write to protected page). * The other Data TLB exceptions bail out to this point * if they can't resolve the lightweight TLB fault. */ START_EXCEPTION(0x0300, DataStorage) EXCEPTION_PROLOG 0x300 DataStorage handle_dar_dsisr=1 prepare_transfer_to_handler bl do_page_fault b interrupt_return /* * 0x0400 - Instruction Storage Exception * This is caused by a fetch from non-execute or guarded pages. */ START_EXCEPTION(0x0400, InstructionAccess) EXCEPTION_PROLOG 0x400 InstructionAccess li r5,0 stw r5, _ESR(r11) /* Zero ESR */ stw r12, _DEAR(r11) /* SRR0 as DEAR */ prepare_transfer_to_handler bl do_page_fault b interrupt_return /* 0x0500 - External Interrupt Exception */ EXCEPTION(0x0500, HardwareInterrupt, do_IRQ) /* 0x0600 - Alignment Exception */ START_EXCEPTION(0x0600, Alignment) EXCEPTION_PROLOG 0x600 Alignment handle_dar_dsisr=1 prepare_transfer_to_handler bl alignment_exception REST_NVGPRS(r1) b interrupt_return /* 0x0700 - Program Exception */ START_EXCEPTION(0x0700, ProgramCheck) EXCEPTION_PROLOG 0x700 ProgramCheck handle_dar_dsisr=1 prepare_transfer_to_handler bl program_check_exception REST_NVGPRS(r1) b interrupt_return EXCEPTION(0x0800, Trap_08, unknown_exception) EXCEPTION(0x0900, Trap_09, unknown_exception) EXCEPTION(0x0A00, Trap_0A, unknown_exception) EXCEPTION(0x0B00, Trap_0B, unknown_exception) /* 0x0C00 - System Call Exception */ START_EXCEPTION(0x0C00, SystemCall) SYSCALL_ENTRY 0xc00 /* Trap_0D is commented out to get more space for system call exception */ /* EXCEPTION(0x0D00, Trap_0D, unknown_exception) */ EXCEPTION(0x0E00, Trap_0E, unknown_exception) EXCEPTION(0x0F00, Trap_0F, unknown_exception) /* 0x1000 - Programmable Interval Timer (PIT) Exception */ START_EXCEPTION(0x1000, DecrementerTrap) b Decrementer /* 0x1010 - Fixed Interval Timer (FIT) Exception */ START_EXCEPTION(0x1010, FITExceptionTrap) b FITException /* 0x1020 - Watchdog Timer (WDT) Exception */ START_EXCEPTION(0x1020, WDTExceptionTrap) b WDTException /* 0x1100 - Data TLB Miss Exception * As the name implies, translation is not in the MMU, so search the * page tables and fix it. The only purpose of this function is to * load TLB entries from the page table if they exist. */ START_EXCEPTION(0x1100, DTLBMiss) mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */ mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r12 mfspr r9, SPRN_PID rlwimi r12, r9, 0, 0xff mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l li r9, 0 mtspr SPRN_PID, r9 /* TLB will have 0 TID */ b 4f /* Get the PGD for the current thread. */ 3: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) #ifdef CONFIG_PPC_KUAP rlwinm. r9, r9, 0, 0xff beq 5f /* Kuap fault */ #endif 4: tophys(r11, r11) rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ lwz r11, 0(r11) /* Get L1 entry */ andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */ beq 2f /* Bail if no table */ rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ lwz r11, 0(r11) /* Get Linux PTE */ li r9, _PAGE_PRESENT | _PAGE_ACCESSED andc. r9, r9, r11 /* Check permission */ bne 5f rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */ and r9, r9, r11 /* hwwrite = dirty & rw */ rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */ /* Create TLB tag. This is the faulting address plus a static * set of bits. These are size, valid, E, U0. */ li r9, 0x00c0 rlwimi r10, r9, 0, 20, 31 b finish_tlb_load 2: /* Check for possible large-page pmd entry */ rlwinm. r9, r11, 2, 22, 24 beq 5f /* Create TLB tag. This is the faulting address, plus a static * set of bits (valid, E, U0) plus the size from the PMD. */ ori r9, r9, 0x40 rlwimi r10, r9, 0, 20, 31 b finish_tlb_load 5: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mtspr SPRN_PID, r12 mtcrf 0x80, r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 mfspr r11, SPRN_SPRG_SCRATCH6 mfspr r10, SPRN_SPRG_SCRATCH5 b DataStorage /* 0x1200 - Instruction TLB Miss Exception * Nearly the same as above, except we get our information from different * registers and bailout to a different point. */ START_EXCEPTION(0x1200, ITLBMiss) mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */ mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r12 mfspr r9, SPRN_PID rlwimi r12, r9, 0, 0xff mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l li r9, 0 mtspr SPRN_PID, r9 /* TLB will have 0 TID */ b 4f /* Get the PGD for the current thread. */ 3: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) #ifdef CONFIG_PPC_KUAP rlwinm. r9, r9, 0, 0xff beq 5f /* Kuap fault */ #endif 4: tophys(r11, r11) rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ lwz r11, 0(r11) /* Get L1 entry */ andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */ beq 2f /* Bail if no table */ rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ lwz r11, 0(r11) /* Get Linux PTE */ li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC andc. r9, r9, r11 /* Check permission */ bne 5f rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */ and r9, r9, r11 /* hwwrite = dirty & rw */ rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */ /* Create TLB tag. This is the faulting address plus a static * set of bits. These are size, valid, E, U0. */ li r9, 0x00c0 rlwimi r10, r9, 0, 20, 31 b finish_tlb_load 2: /* Check for possible large-page pmd entry */ rlwinm. r9, r11, 2, 22, 24 beq 5f /* Create TLB tag. This is the faulting address, plus a static * set of bits (valid, E, U0) plus the size from the PMD. */ ori r9, r9, 0x40 rlwimi r10, r9, 0, 20, 31 b finish_tlb_load 5: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ mtspr SPRN_PID, r12 mtcrf 0x80, r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 mfspr r11, SPRN_SPRG_SCRATCH6 mfspr r10, SPRN_SPRG_SCRATCH5 b InstructionAccess EXCEPTION(0x1300, Trap_13, unknown_exception) EXCEPTION(0x1400, Trap_14, unknown_exception) EXCEPTION(0x1500, Trap_15, unknown_exception) EXCEPTION(0x1600, Trap_16, unknown_exception) EXCEPTION(0x1700, Trap_17, unknown_exception) EXCEPTION(0x1800, Trap_18, unknown_exception) EXCEPTION(0x1900, Trap_19, unknown_exception) EXCEPTION(0x1A00, Trap_1A, unknown_exception) EXCEPTION(0x1B00, Trap_1B, unknown_exception) EXCEPTION(0x1C00, Trap_1C, unknown_exception) EXCEPTION(0x1D00, Trap_1D, unknown_exception) EXCEPTION(0x1E00, Trap_1E, unknown_exception) EXCEPTION(0x1F00, Trap_1F, unknown_exception) /* Check for a single step debug exception while in an exception * handler before state has been saved. This is to catch the case * where an instruction that we are trying to single step causes * an exception (eg ITLB/DTLB miss) and thus the first instruction of * the exception handler generates a single step debug exception. * * If we get a debug trap on the first instruction of an exception handler, * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). * The exception handler was handling a non-critical interrupt, so it will * save (and later restore) the MSR via SPRN_SRR1, which will still have * the MSR_DE bit set. */ /* 0x2000 - Debug Exception */ START_EXCEPTION(0x2000, DebugTrap) CRITICAL_EXCEPTION_PROLOG 0x2000 DebugTrap /* * If this is a single step or branch-taken exception in an * exception entry sequence, it was probably meant to apply to * the code where the exception occurred (since exception entry * doesn't turn off DE automatically). We simulate the effect * of turning off DE on entry to an exception handler by turning * off DE in the SRR3 value and clearing the debug status. */ mfspr r10,SPRN_DBSR /* check single-step/branch taken */ andis. r10,r10,DBSR_IC@h beq+ 2f andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ beq 1f /* branch and fix it up */ mfspr r10,SPRN_SRR2 /* Faulting instruction address */ cmplwi r10,0x2100 bgt+ 2f /* address above exception vectors */ /* here it looks like we got an inappropriate debug exception. */ 1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ lis r10,DBSR_IC@h /* clear the IC event */ mtspr SPRN_DBSR,r10 /* restore state and get out */ lwz r10,_CCR(r11) lwz r0,GPR0(r11) lwz r1,GPR1(r11) mtcrf 0x80,r10 mtspr SPRN_SRR2,r12 mtspr SPRN_SRR3,r9 lwz r9,GPR9(r11) lwz r12,GPR12(r11) lwz r10,crit_r10@l(0) lwz r11,crit_r11@l(0) rfci b . /* continue normal handling for a critical exception... */ 2: mfspr r4,SPRN_DBSR stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */ prepare_transfer_to_handler bl DebugException b ret_from_crit_exc /* Programmable Interval Timer (PIT) Exception. (from 0x1000) */ __HEAD Decrementer: EXCEPTION_PROLOG 0x1000 Decrementer lis r0,TSR_PIS@h mtspr SPRN_TSR,r0 /* Clear the PIT exception */ prepare_transfer_to_handler bl timer_interrupt b interrupt_return /* Fixed Interval Timer (FIT) Exception. (from 0x1010) */ __HEAD FITException: EXCEPTION_PROLOG 0x1010 FITException prepare_transfer_to_handler bl unknown_exception b interrupt_return /* Watchdog Timer (WDT) Exception. (from 0x1020) */ __HEAD WDTException: CRITICAL_EXCEPTION_PROLOG 0x1020 WDTException prepare_transfer_to_handler bl WatchdogException b ret_from_crit_exc /* Other PowerPC processors, namely those derived from the 6xx-series * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. * However, for the 4xx-series processors these are neither defined nor * reserved. */ __HEAD /* Damn, I came up one instruction too many to fit into the * exception space :-). Both the instruction and data TLB * miss get to this point to load the TLB. * r10 - TLB_TAG value * r11 - Linux PTE * r9 - available to use * PID - loaded with proper value when we get here * Upon exit, we reload everything and RFI. * Actually, it will fit now, but oh well.....a common place * to load the TLB. */ tlb_4xx_index: .long 0 finish_tlb_load: /* * Clear out the software-only bits in the PTE to generate the * TLB_DATA value. These are the bottom 2 bits of the RPM, the * top 3 bits of the zone field, and M. */ li r9, 0x0ce2 andc r11, r11, r9 /* load the next available TLB index. */ lwz r9, tlb_4xx_index@l(0) addi r9, r9, 1 andi. r9, r9, PPC40X_TLB_SIZE - 1 stw r9, tlb_4xx_index@l(0) tlbwe r11, r9, TLB_DATA /* Load TLB LO */ tlbwe r10, r9, TLB_TAG /* Load TLB HI */ /* Done...restore registers and get out of here. */ mtspr SPRN_PID, r12 mtcrf 0x80, r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 mfspr r11, SPRN_SPRG_SCRATCH6 mfspr r10, SPRN_SPRG_SCRATCH5 rfi /* Should sync shadow TLBs */ b . /* prevent prefetch past rfi */ /* This is where the main kernel code starts. */ start_here: /* ptr to current */ lis r2,init_task@h ori r2,r2,init_task@l /* ptr to phys current thread */ tophys(r4,r2) addi r4,r4,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 /* stack */ lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) bl early_init /* We have to do this with MMU on */ /* * Decide what sort of machine this is and initialize the MMU. */ #ifdef CONFIG_KASAN bl kasan_early_init #endif li r3,0 mr r4,r31 bl machine_init bl MMU_init /* Go back to running unmapped so we can load up new values * and change to using our exception vectors. * On the 4xx, all we have to do is invalidate the TLB to clear * the old 16M byte TLB mappings. */ lis r4,2f@h ori r4,r4,2f@l tophys(r4,r4) lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 rfi b . /* prevent prefetch past rfi */ /* Load up the kernel context */ 2: sync /* Flush to memory before changing TLB */ tlbia isync /* Flush shadow TLBs */ /* set up the PTE pointers for the Abatron bdiGDB. */ lis r6, swapper_pg_dir@h ori r6, r6, swapper_pg_dir@l lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) stw r6, 0(r5) /* Now turn on the MMU for real! */ lis r4,MSR_KERNEL@h ori r4,r4,MSR_KERNEL@l lis r3,start_kernel@h ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfi /* enable MMU and jump to start_kernel */ b . /* prevent prefetch past rfi */ /* Set up the initial MMU state so we can do the first level of * kernel initialization. This maps the first 32 MBytes of memory 1:1 * virtual to physical and more importantly sets the cache mode. */ initial_mmu: tlbia /* Invalidate all TLB entries */ isync /* We should still be executing code at physical address 0x0000xxxx * at this point. However, start_here is at virtual address * 0xC000xxxx. So, set up a TLB mapping to cover this once * translation is enabled. */ lis r3,KERNELBASE@h /* Load the kernel virtual address */ ori r3,r3,KERNELBASE@l tophys(r4,r3) /* Load the kernel physical address */ iccci r0,r3 /* Invalidate the i-cache before use */ /* Load the kernel PID. */ li r0,0 mtspr SPRN_PID,r0 sync /* Configure and load one entry into TLB slots 63 */ clrrwi r4,r4,10 /* Mask off the real page number */ ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ clrrwi r3,r3,10 /* Mask off the effective page number */ ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) li r0,63 /* TLB slot 63 */ tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ li r0,62 /* TLB slot 62 */ addis r4,r4,SZ_16M@h addis r3,r3,SZ_16M@h tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ isync /* Establish the exception vector base */ lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ tophys(r0,r4) /* Use the physical address */ mtspr SPRN_EVPR,r0 blr _GLOBAL(abort) mfspr r13,SPRN_DBCR0 oris r13,r13,DBCR0_RST_SYSTEM@h mtspr SPRN_DBCR0,r13
aixcc-public/challenge-001-exemplar-source
22,551
arch/powerpc/kernel/head_8xx.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * MPC8xx modifications by Dan Malek * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). * * This file contains low-level support and setup for PowerPC 8xx * embedded processors, including trap and interrupt dispatch. */ #include <linux/init.h> #include <linux/magic.h> #include <linux/pgtable.h> #include <linux/sizes.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cache.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/export.h> #include <asm/code-patching-asm.h> #include <asm/interrupt.h> /* * Value for the bits that have fixed value in RPN entries. * Also used for tagging DAR for DTLBerror. */ #define RPN_PATTERN 0x00f0 #include "head_32.h" .macro compare_to_kernel_boundary scratch, addr #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 /* By simply checking Address >= 0x80000000, we know if its a kernel address */ not. \scratch, \addr #else rlwinm \scratch, \addr, 16, 0xfff8 cmpli cr0, \scratch, PAGE_OFFSET@h #endif .endm #define PAGE_SHIFT_512K 19 #define PAGE_SHIFT_8M 23 __HEAD _GLOBAL(_stext); _GLOBAL(_start); /* MPC8xx * This port was done on an MBX board with an 860. Right now I only * support an ELF compressed (zImage) boot from EPPC-Bug because the * code there loads up some registers before calling us: * r3: ptr to board info data * r4: initrd_start or if no initrd then 0 * r5: initrd_end - unused if r4 is 0 * r6: Start of command line string * r7: End of command line string * * I decided to use conditional compilation instead of checking PVR and * adding more processor specific branches around code I don't need. * Since this is an embedded processor, I also appreciate any memory * savings I can get. * * The MPC8xx does not have any BATs, but it supports large page sizes. * We first initialize the MMU to support 8M byte pages, then load one * entry into each of the instruction and data TLBs to map the first * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to * the "internal" processor registers before MMU_init is called. * * -- Dan */ .globl __start __start: mr r31,r3 /* save device tree ptr */ /* We have to turn on the MMU right away so we get cache modes * set correctly. */ bl initial_mmu /* We now have the lower 8 Meg mapped into TLB entries, and the caches * ready to work. */ turn_on_mmu: mfmsr r0 ori r0,r0,MSR_DR|MSR_IR mtspr SPRN_SRR1,r0 lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 rfi /* enables MMU */ #ifdef CONFIG_PERF_EVENTS .align 4 .globl itlb_miss_counter itlb_miss_counter: .space 4 .globl dtlb_miss_counter dtlb_miss_counter: .space 4 .globl instruction_counter instruction_counter: .space 4 #endif /* System reset */ EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, system_reset_exception) /* Machine check */ START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck) EXCEPTION_PROLOG INTERRUPT_MACHINE_CHECK MachineCheck handle_dar_dsisr=1 prepare_transfer_to_handler bl machine_check_exception b interrupt_return /* External interrupt */ EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ) /* Alignment exception */ START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment) EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1 prepare_transfer_to_handler bl alignment_exception REST_NVGPRS(r1) b interrupt_return /* Program check exception */ START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck) EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck prepare_transfer_to_handler bl program_check_exception REST_NVGPRS(r1) b interrupt_return /* Decrementer */ EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt) /* System call */ START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall) SYSCALL_ENTRY INTERRUPT_SYSCALL /* Single step - not used on 601 */ EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception) /* On the MPC8xx, this is a software emulation interrupt. It occurs * for all unimplemented and illegal instructions. */ START_EXCEPTION(INTERRUPT_SOFT_EMU_8xx, SoftEmu) EXCEPTION_PROLOG INTERRUPT_SOFT_EMU_8xx SoftEmu prepare_transfer_to_handler bl emulation_assist_interrupt REST_NVGPRS(r1) b interrupt_return /* * For the MPC8xx, this is a software tablewalk to load the instruction * TLB. The task switch loads the M_TWB register with the pointer to the first * level table. * If we discover there is no second level table (value is zero) or if there * is an invalid pte, we load that into the TLB, which causes another fault * into the TLB Error interrupt where we can handle such problems. * We have to use the MD_xxx registers for the tablewalk because the * equivalent MI_xxx registers only perform the attribute functions. */ #ifdef CONFIG_8xx_CPU15 #define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp) \ addi tmp, addr, PAGE_SIZE; \ tlbie tmp; \ addi tmp, addr, -PAGE_SIZE; \ tlbie tmp #else #define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp) #endif START_EXCEPTION(INTERRUPT_INST_TLB_MISS_8xx, InstructionTLBMiss) mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 /* If we are faulting a kernel address, we have to use the * kernel page tables. */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */ INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11) mtspr SPRN_MD_EPN, r10 #ifdef CONFIG_MODULES mfcr r11 compare_to_kernel_boundary r10, r10 #endif mfspr r10, SPRN_M_TWB /* Get level 1 table */ #ifdef CONFIG_MODULES blt+ 3f rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: mtcr r11 #endif lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K mtspr SPRN_MI_TWC, r11 /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 20 and 23 must be clear. * Software indicator bits 22, 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior * of the MMU. */ rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */ rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */ ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ /* Restore registers */ 0: mfspr r10, SPRN_SPRG_SCRATCH2 mfspr r11, SPRN_M_TW rfi patch_site 0b, patch__itlbmiss_exit_1 #ifdef CONFIG_PERF_EVENTS patch_site 0f, patch__itlbmiss_perf 0: lwz r10, (itlb_miss_counter - PAGE_OFFSET)@l(0) addi r10, r10, 1 stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0) mfspr r10, SPRN_SPRG_SCRATCH2 mfspr r11, SPRN_M_TW rfi #endif START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss) mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 mfcr r11 /* If we are faulting a kernel address, we have to use the * kernel page tables. */ mfspr r10, SPRN_MD_EPN compare_to_kernel_boundary r10, r10 mfspr r10, SPRN_M_TWB /* Get level 1 table */ blt+ 3f rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: mtcr r11 lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ /* Insert Guarded and Accessed flags into the TWC from the Linux PTE. * It is bit 27 of both the Linux PTE and the TWC (at least * I got that right :-). It will be better when we can put * this into the Linux pgd/pmd and load it in the operation * above. */ rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K mtspr SPRN_MD_TWC, r11 /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior * of the MMU. */ li r11, RPN_PATTERN rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ mtspr SPRN_DAR, r11 /* Tag DAR */ /* Restore registers */ 0: mfspr r10, SPRN_SPRG_SCRATCH2 mfspr r11, SPRN_M_TW rfi patch_site 0b, patch__dtlbmiss_exit_1 #ifdef CONFIG_PERF_EVENTS patch_site 0f, patch__dtlbmiss_perf 0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) addi r10, r10, 1 stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) mfspr r10, SPRN_SPRG_SCRATCH2 mfspr r11, SPRN_M_TW rfi #endif /* This is an instruction TLB error on the MPC8xx. This could be due * to many reasons, such as executing guarded memory or illegal instruction * addresses. There is nothing to do but handle a big time error fault. */ START_EXCEPTION(INTERRUPT_INST_TLB_ERROR_8xx, InstructionTLBError) /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ EXCEPTION_PROLOG INTERRUPT_INST_STORAGE InstructionTLBError andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ andis. r10,r9,SRR1_ISI_NOPT@h beq+ .Litlbie tlbie r12 .Litlbie: stw r12, _DAR(r11) stw r5, _DSISR(r11) prepare_transfer_to_handler bl do_page_fault b interrupt_return /* This is the data TLB error on the MPC8xx. This could be due to * many reasons, including a dirty update to a pte. We bail out to * a higher level function that can handle it. */ START_EXCEPTION(INTERRUPT_DATA_TLB_ERROR_8xx, DataTLBError) EXCEPTION_PROLOG_0 handle_dar_dsisr=1 mfspr r11, SPRN_DAR cmpwi cr1, r11, RPN_PATTERN beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */ DARFixed:/* Return from dcbx instruction bug workaround */ EXCEPTION_PROLOG_1 /* 0x300 is DataAccess exception, needed by bad_page_fault() */ EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1 lwz r4, _DAR(r11) lwz r5, _DSISR(r11) andis. r10,r5,DSISR_NOHPTE@h beq+ .Ldtlbie tlbie r4 .Ldtlbie: prepare_transfer_to_handler bl do_page_fault b interrupt_return #ifdef CONFIG_VMAP_STACK vmap_stack_overflow_exception #endif /* On the MPC8xx, these next four traps are used for development * support of breakpoints and such. Someday I will get around to * using them. */ START_EXCEPTION(INTERRUPT_DATA_BREAKPOINT_8xx, DataBreakpoint) EXCEPTION_PROLOG_0 handle_dar_dsisr=1 mfspr r11, SPRN_SRR0 cmplwi cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l cmplwi cr7, r11, (.Litlbie - PAGE_OFFSET)@l cror 4*cr1+eq, 4*cr1+eq, 4*cr7+eq bne cr1, 1f mtcr r10 mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 rfi 1: EXCEPTION_PROLOG_1 EXCEPTION_PROLOG_2 INTERRUPT_DATA_BREAKPOINT_8xx DataBreakpoint handle_dar_dsisr=1 mfspr r4,SPRN_BAR stw r4,_DAR(r11) prepare_transfer_to_handler bl do_break REST_NVGPRS(r1) b interrupt_return #ifdef CONFIG_PERF_EVENTS START_EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, InstructionBreakpoint) mtspr SPRN_SPRG_SCRATCH0, r10 lwz r10, (instruction_counter - PAGE_OFFSET)@l(0) addi r10, r10, -1 stw r10, (instruction_counter - PAGE_OFFSET)@l(0) lis r10, 0xffff ori r10, r10, 0x01 mtspr SPRN_COUNTA, r10 mfspr r10, SPRN_SPRG_SCRATCH0 rfi #else EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, Trap_1d, unknown_exception) #endif EXCEPTION(0x1e00, Trap_1e, unknown_exception) EXCEPTION(0x1f00, Trap_1f, unknown_exception) __HEAD . = 0x2000 /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions * by decoding the registers used by the dcbx instruction and adding them. * DAR is set to the calculated address. */ FixupDAR:/* Entry point for dcbx workaround. */ mtspr SPRN_M_TW, r10 /* fetch instruction from memory. */ mfspr r10, SPRN_SRR0 mtspr SPRN_MD_EPN, r10 rlwinm r11, r10, 16, 0xfff8 cmpli cr1, r11, PAGE_OFFSET@h mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ cr1, 3f /* create physical page address from effective address */ tophys(r11, r10) mfspr r11, SPRN_M_TWB /* Get level 1 table */ rlwinm r11, r11, 0, 20, 31 oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha 3: lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ mtspr SPRN_MD_TWC, r11 mtcrf 0x01, r11 mfspr r11, SPRN_MD_TWC lwz r11, 0(r11) /* Get the pte */ bt 28,200f /* bit 28 = Large page (8M) */ /* concat physical page address(r11) and page offset(r10) */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31 201: lwz r11,0(r11) /* Check if it really is a dcbx instruction. */ /* dcbt and dcbtst does not generate DTLB Misses/Errors, * no need to include them here */ xoris r10, r11, 0x7c00 /* check if major OP code is 31 */ rlwinm r10, r10, 0, 21, 5 cmpwi cr1, r10, 2028 /* Is dcbz? */ beq+ cr1, 142f cmpwi cr1, r10, 940 /* Is dcbi? */ beq+ cr1, 142f cmpwi cr1, r10, 108 /* Is dcbst? */ beq+ cr1, 144f /* Fix up store bit! */ cmpwi cr1, r10, 172 /* Is dcbf? */ beq+ cr1, 142f cmpwi cr1, r10, 1964 /* Is icbi? */ beq+ cr1, 142f 141: mfspr r10,SPRN_M_TW b DARFixed /* Nope, go back to normal TLB processing */ 200: /* concat physical page address(r11) and page offset(r10) */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31 b 201b 144: mfspr r10, SPRN_DSISR rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ mtspr SPRN_DSISR, r10 142: /* continue, it was a dcbx, dcbi instruction. */ mfctr r10 mtdar r10 /* save ctr reg in DAR */ rlwinm r10, r11, 24, 24, 28 /* offset into jump table for reg RB */ addi r10, r10, 150f@l /* add start of table */ mtctr r10 /* load ctr with jump address */ xor r10, r10, r10 /* sum starts at zero */ bctr /* jump into table */ 150: add r10, r10, r0 ;b 151f add r10, r10, r1 ;b 151f add r10, r10, r2 ;b 151f add r10, r10, r3 ;b 151f add r10, r10, r4 ;b 151f add r10, r10, r5 ;b 151f add r10, r10, r6 ;b 151f add r10, r10, r7 ;b 151f add r10, r10, r8 ;b 151f add r10, r10, r9 ;b 151f mtctr r11 ;b 154f /* r10 needs special handling */ mtctr r11 ;b 153f /* r11 needs special handling */ add r10, r10, r12 ;b 151f add r10, r10, r13 ;b 151f add r10, r10, r14 ;b 151f add r10, r10, r15 ;b 151f add r10, r10, r16 ;b 151f add r10, r10, r17 ;b 151f add r10, r10, r18 ;b 151f add r10, r10, r19 ;b 151f add r10, r10, r20 ;b 151f add r10, r10, r21 ;b 151f add r10, r10, r22 ;b 151f add r10, r10, r23 ;b 151f add r10, r10, r24 ;b 151f add r10, r10, r25 ;b 151f add r10, r10, r26 ;b 151f add r10, r10, r27 ;b 151f add r10, r10, r28 ;b 151f add r10, r10, r29 ;b 151f add r10, r10, r30 ;b 151f add r10, r10, r31 151: rlwinm r11,r11,19,24,28 /* offset into jump table for reg RA */ cmpwi cr1, r11, 0 beq cr1, 152f /* if reg RA is zero, don't add it */ addi r11, r11, 150b@l /* add start of table */ mtctr r11 /* load ctr with jump address */ rlwinm r11,r11,0,16,10 /* make sure we don't execute this more than once */ bctr /* jump into table */ 152: mfdar r11 mtctr r11 /* restore ctr reg from DAR */ mfspr r11, SPRN_SPRG_THREAD stw r10, DAR(r11) mfspr r10, SPRN_DSISR stw r10, DSISR(r11) mfspr r10,SPRN_M_TW b DARFixed /* Go back to normal TLB handling */ /* special handling for r10,r11 since these are modified already */ 153: mfspr r11, SPRN_SPRG_SCRATCH1 /* load r11 from SPRN_SPRG_SCRATCH1 */ add r10, r10, r11 /* add it */ mfctr r11 /* restore r11 */ b 151b 154: mfspr r11, SPRN_SPRG_SCRATCH0 /* load r10 from SPRN_SPRG_SCRATCH0 */ add r10, r10, r11 /* add it */ mfctr r11 /* restore r11 */ b 151b /* * This is where the main kernel code starts. */ start_here: /* ptr to current */ lis r2,init_task@h ori r2,r2,init_task@l /* ptr to phys current thread */ tophys(r4,r2) addi r4,r4,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 /* stack */ lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l lis r0, STACK_END_MAGIC@h ori r0, r0, STACK_END_MAGIC@l stw r0, 0(r1) li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) lis r6, swapper_pg_dir@ha tophys(r6,r6) mtspr SPRN_M_TWB, r6 bl early_init /* We have to do this with MMU on */ /* * Decide what sort of machine this is and initialize the MMU. */ #ifdef CONFIG_KASAN bl kasan_early_init #endif li r3,0 mr r4,r31 bl machine_init bl MMU_init /* * Go back to running unmapped so we can load up new values * and change to using our exception vectors. * On the 8xx, all we have to do is invalidate the TLB to clear * the old 8M byte TLB mappings and load the page table base register. */ /* The right way to do this would be to track it down through * init's THREAD like the context switch code does, but this is * easier......until someone changes init's static structures. */ lis r4,2f@h ori r4,r4,2f@l tophys(r4,r4) li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 rfi /* Load up the kernel context */ 2: #ifdef CONFIG_PIN_TLB_IMMR lis r0, MD_TWAM@h oris r0, r0, 0x1f00 mtspr SPRN_MD_CTR, r0 LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID) tlbie r0 mtspr SPRN_MD_EPN, r0 LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED) mtspr SPRN_MD_TWC, r0 mfspr r0, SPRN_IMMR rlwinm r0, r0, 0, 0xfff80000 ori r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \ _PAGE_NO_CACHE | _PAGE_PRESENT mtspr SPRN_MD_RPN, r0 lis r0, (MD_TWAM | MD_RSV4I)@h mtspr SPRN_MD_CTR, r0 #endif #if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR) lis r0, MD_TWAM@h mtspr SPRN_MD_CTR, r0 #endif tlbia /* Clear all TLB entries */ sync /* wait for tlbia/tlbie to finish */ /* set up the PTE pointers for the Abatron bdiGDB. */ lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) lis r6, swapper_pg_dir@h ori r6, r6, swapper_pg_dir@l stw r6, 0(r5) /* Now turn on the MMU for real! */ li r4,MSR_KERNEL lis r3,start_kernel@h ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfi /* enable MMU and jump to start_kernel */ /* Set up the initial MMU state so we can do the first level of * kernel initialization. This maps the first 8 MBytes of memory 1:1 * virtual to physical. Also, set the cache mode since that is defined * by TLB entries and perform any additional mapping (like of the IMMR). * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by * these mappings is mapped by page tables. */ initial_mmu: li r8, 0 mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */ lis r10, MD_TWAM@h mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */ tlbia /* Invalidate all TLB entries */ lis r8, MI_APG_INIT@h /* Set protection modes */ ori r8, r8, MI_APG_INIT@l mtspr SPRN_MI_AP, r8 lis r8, MD_APG_INIT@h ori r8, r8, MD_APG_INIT@l mtspr SPRN_MD_AP, r8 /* Map the lower RAM (up to 32 Mbytes) into the ITLB and DTLB */ lis r8, MI_RSV4I@h ori r8, r8, 0x1c00 oris r12, r10, MD_RSV4I@h ori r12, r12, 0x1c00 li r9, 4 /* up to 4 pages of 8M */ mtctr r9 lis r9, KERNELBASE@h /* Create vaddr for TLB */ li r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID li r11, MI_BOOTINIT /* Create RPN for address 0 */ 1: mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ addi r8, r8, 0x100 ori r0, r9, MI_EVALID /* Mark it valid */ mtspr SPRN_MI_EPN, r0 mtspr SPRN_MI_TWC, r10 mtspr SPRN_MI_RPN, r11 /* Store TLB entry */ mtspr SPRN_MD_CTR, r12 addi r12, r12, 0x100 mtspr SPRN_MD_EPN, r0 mtspr SPRN_MD_TWC, r10 mtspr SPRN_MD_RPN, r11 addis r9, r9, 0x80 addis r11, r11, 0x80 bdnz 1b /* Since the cache is enabled according to the information we * just loaded into the TLB, invalidate and enable the caches here. * We should probably check/set other modes....later. */ lis r8, IDC_INVALL@h mtspr SPRN_IC_CST, r8 mtspr SPRN_DC_CST, r8 lis r8, IDC_ENABLE@h mtspr SPRN_IC_CST, r8 mtspr SPRN_DC_CST, r8 /* Disable debug mode entry on breakpoints */ mfspr r8, SPRN_DER #ifdef CONFIG_PERF_EVENTS rlwinm r8, r8, 0, ~0xc #else rlwinm r8, r8, 0, ~0x8 #endif mtspr SPRN_DER, r8 blr _GLOBAL(mmu_pin_tlb) lis r9, (1f - PAGE_OFFSET)@h ori r9, r9, (1f - PAGE_OFFSET)@l mfmsr r10 mflr r11 li r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI) rlwinm r0, r10, 0, ~MSR_RI rlwinm r0, r0, 0, ~MSR_EE mtmsr r0 isync .align 4 mtspr SPRN_SRR0, r9 mtspr SPRN_SRR1, r12 rfi 1: li r5, 0 lis r6, MD_TWAM@h mtspr SPRN_MI_CTR, r5 mtspr SPRN_MD_CTR, r6 tlbia LOAD_REG_IMMEDIATE(r5, 28 << 8) LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) LOAD_REG_ADDR(r9, _sinittext) li r0, 4 mtctr r0 2: ori r0, r6, MI_EVALID mtspr SPRN_MI_CTR, r5 mtspr SPRN_MI_EPN, r0 mtspr SPRN_MI_TWC, r7 mtspr SPRN_MI_RPN, r8 addi r5, r5, 0x100 addis r6, r6, SZ_8M@h addis r8, r8, SZ_8M@h cmplw r6, r9 bdnzt lt, 2b lis r0, MI_RSV4I@h mtspr SPRN_MI_CTR, r0 LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM) #ifdef CONFIG_PIN_TLB_DATA LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) li r8, 0 #ifdef CONFIG_PIN_TLB_IMMR li r0, 3 #else li r0, 4 #endif mtctr r0 cmpwi r4, 0 beq 4f LOAD_REG_ADDR(r9, _sinittext) 2: ori r0, r6, MD_EVALID ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT mtspr SPRN_MD_CTR, r5 mtspr SPRN_MD_EPN, r0 mtspr SPRN_MD_TWC, r7 mtspr SPRN_MD_RPN, r12 addi r5, r5, 0x100 addis r6, r6, SZ_8M@h addis r8, r8, SZ_8M@h cmplw r6, r9 bdnzt lt, 2b 4: 2: ori r0, r6, MD_EVALID ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT mtspr SPRN_MD_CTR, r5 mtspr SPRN_MD_EPN, r0 mtspr SPRN_MD_TWC, r7 mtspr SPRN_MD_RPN, r12 addi r5, r5, 0x100 addis r6, r6, SZ_8M@h addis r8, r8, SZ_8M@h cmplw r6, r3 bdnzt lt, 2b #endif #ifdef CONFIG_PIN_TLB_IMMR LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID) LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED) mfspr r8, SPRN_IMMR rlwinm r8, r8, 0, 0xfff80000 ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \ _PAGE_NO_CACHE | _PAGE_PRESENT mtspr SPRN_MD_CTR, r5 mtspr SPRN_MD_EPN, r0 mtspr SPRN_MD_TWC, r7 mtspr SPRN_MD_RPN, r8 #endif #if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA) lis r0, (MD_RSV4I | MD_TWAM)@h mtspr SPRN_MD_CTR, r0 #endif mtspr SPRN_SRR1, r10 mtspr SPRN_SRR0, r11 rfi
aixcc-public/challenge-001-exemplar-source
9,366
arch/powerpc/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #ifdef CONFIG_PPC64 #define PROVIDE32(x) PROVIDE(__unused__##x) #else #define PROVIDE32(x) PROVIDE(x) #endif #define BSS_FIRST_SECTIONS *(.bss.prominit) #define EMITS_PT_NOTE #define RO_EXCEPTION_TABLE_ALIGN 0 #define RUNTIME_DISCARD_EXIT #define SOFT_MASK_TABLE(align) \ . = ALIGN(align); \ __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \ __start___soft_mask_table = .; \ KEEP(*(__soft_mask_table)) \ __stop___soft_mask_table = .; \ } #define RESTART_TABLE(align) \ . = ALIGN(align); \ __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \ __start___restart_table = .; \ KEEP(*(__restart_table)) \ __stop___restart_table = .; \ } #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT) #if STRICT_ALIGN_SIZE < PAGE_SIZE #error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT" #endif ENTRY(_stext) PHDRS { text PT_LOAD FLAGS(7); /* RWX */ note PT_NOTE FLAGS(0); } #ifdef CONFIG_PPC64 OUTPUT_ARCH(powerpc:common64) jiffies = jiffies_64; #else OUTPUT_ARCH(powerpc:common) jiffies = jiffies_64 + 4; #endif SECTIONS { . = KERNELBASE; /* * Text, read only data and other permanent read-only sections */ _text = .; _stext = .; /* * Head text. * This needs to be in its own output section to avoid ld placing * branch trampoline stubs randomly throughout the fixed sections, * which it will do (even if the branch comes from another section) * in order to optimize stub generation. */ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { #ifdef CONFIG_PPC64 KEEP(*(.head.text.first_256B)); #ifdef CONFIG_PPC_BOOK3E_64 #else KEEP(*(.head.text.real_vectors)); *(.head.text.real_trampolines); KEEP(*(.head.text.virt_vectors)); *(.head.text.virt_trampolines); # if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) KEEP(*(.head.data.fwnmi_page)); # endif #endif #else /* !CONFIG_PPC64 */ HEAD_TEXT #endif } :text __head_end = .; #ifdef CONFIG_PPC64 /* * ALIGN(0) overrides the default output section alignment because * this needs to start right after .head.text in order for fixed * section placement to work. */ .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) { #ifdef CONFIG_LD_HEAD_STUB_CATCH KEEP(*(.linker_stub_catch)); . = . ; #endif #else .text : AT(ADDR(.text) - LOAD_OFFSET) { ALIGN_FUNCTION(); #endif /* careful! __ftr_alt_* sections need to be close to .text */ *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text); #ifdef CONFIG_PPC64 *(.tramp.ftrace.text); #endif NOINSTR_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT /* * -Os builds call FP save/restore functions. The powerpc64 * linker generates those on demand in the .sfpr section. * .sfpr gets placed at the beginning of a group of input * sections, which can break start-of-text offset if it is * included with the main text sections, so put it by itself. */ *(.sfpr); MEM_KEEP(init.text) MEM_KEEP(exit.text) } :text . = ALIGN(PAGE_SIZE); _etext = .; PROVIDE32 (etext = .); /* Read-only data */ RO_DATA(PAGE_SIZE) #ifdef CONFIG_PPC32 .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { *(.sdata2) } #endif .data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) { *(.data.rel.ro .data.rel.ro.*) } .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) { *(.branch_lt) } #ifdef CONFIG_PPC32 .got1 : AT(ADDR(.got1) - LOAD_OFFSET) { *(.got1) } .got2 : AT(ADDR(.got2) - LOAD_OFFSET) { __got2_start = .; *(.got2) __got2_end = .; } .got : AT(ADDR(.got) - LOAD_OFFSET) { *(.got) *(.got.plt) } .plt : AT(ADDR(.plt) - LOAD_OFFSET) { /* XXX: is .plt (and .got.plt) required? */ *(.plt) } #else /* CONFIG_PPC32 */ .toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) { *(.toc1) } .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) { *(.got .toc) } SOFT_MASK_TABLE(8) RESTART_TABLE(8) #ifdef CONFIG_PPC64_ELF_ABI_V1 .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; KEEP(*(.opd)) __end_opd = .; } #endif . = ALIGN(8); __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) { __start___stf_entry_barrier_fixup = .; *(__stf_entry_barrier_fixup) __stop___stf_entry_barrier_fixup = .; } . = ALIGN(8); __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) { __start___uaccess_flush_fixup = .; *(__uaccess_flush_fixup) __stop___uaccess_flush_fixup = .; } . = ALIGN(8); __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { __start___entry_flush_fixup = .; *(__entry_flush_fixup) __stop___entry_flush_fixup = .; } . = ALIGN(8); __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) { __start___scv_entry_flush_fixup = .; *(__scv_entry_flush_fixup) __stop___scv_entry_flush_fixup = .; } . = ALIGN(8); __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { __start___stf_exit_barrier_fixup = .; *(__stf_exit_barrier_fixup) __stop___stf_exit_barrier_fixup = .; } . = ALIGN(8); __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { __start___rfi_flush_fixup = .; *(__rfi_flush_fixup) __stop___rfi_flush_fixup = .; } #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC_BARRIER_NOSPEC . = ALIGN(8); __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { __start___barrier_nospec_fixup = .; *(__barrier_nospec_fixup) __stop___barrier_nospec_fixup = .; } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ #ifdef CONFIG_PPC_E500 . = ALIGN(8); __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { __start__btb_flush_fixup = .; *(__btb_flush_fixup) __stop__btb_flush_fixup = .; } #endif /* * Various code relies on __init_begin being at the strict RWX boundary. */ . = ALIGN(STRICT_ALIGN_SIZE); __srwx_boundary = .; __end_rodata = .; __init_begin = .; /* * Init sections discarded at runtime */ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT /* *.init.text might be RO so we must ensure this section ends on * a page boundary. */ . = ALIGN(PAGE_SIZE); _einittext = .; #ifdef CONFIG_PPC64 *(.tramp.ftrace.init); #endif } :text /* .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table */ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT } . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(16) . = ALIGN(8); __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { __start___ftr_fixup = .; KEEP(*(__ftr_fixup)) __stop___ftr_fixup = .; } . = ALIGN(8); __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) { __start___mmu_ftr_fixup = .; KEEP(*(__mmu_ftr_fixup)) __stop___mmu_ftr_fixup = .; } . = ALIGN(8); __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { __start___lwsync_fixup = .; KEEP(*(__lwsync_fixup)) __stop___lwsync_fixup = .; } #ifdef CONFIG_PPC64 . = ALIGN(8); __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { __start___fw_ftr_fixup = .; KEEP(*(__fw_ftr_fixup)) __stop___fw_ftr_fixup = .; } #endif PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(8); .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { __machine_desc_start = . ; KEEP(*(.machine.desc)) __machine_desc_end = . ; } #ifdef CONFIG_RELOCATABLE . = ALIGN(8); .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { __dynamic_symtab = .; *(.dynsym) } .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) } .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET) { __dynamic_start = .; *(.dynamic) } .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) } .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) } .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) } .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET) { __rela_dyn_start = .; *(.rela*) } #endif /* .exit.data is discarded at runtime, not link time, * to deal with references from .exit.text */ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } /* freed after init ends here */ . = ALIGN(PAGE_SIZE); __init_end = .; /* * And now the various read/write data */ . = ALIGN(PAGE_SIZE); _sdata = .; .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA *(.data.rel*) #ifdef CONFIG_PPC32 *(SDATA_MAIN) #endif } /* The initial task and kernel stack */ INIT_TASK_DATA_SECTION(THREAD_ALIGN) .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) } .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) { CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) } .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) { READ_MOSTLY_DATA(L1_CACHE_BYTES) } . = ALIGN(PAGE_SIZE); .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { NOSAVE_DATA } BUG_TABLE . = ALIGN(PAGE_SIZE); _edata = .; PROVIDE32 (edata = .); /* * And finally the bss */ BSS_SECTION(0, 0, 0) . = ALIGN(PAGE_SIZE); _end = . ; PROVIDE32 (end = .); DWARF_DEBUG ELF_DETAILS DISCARDS /DISCARD/ : { *(*.EMB.apuinfo) *(.glink .iplt .plt .comment) *(.gnu.version*) *(.gnu.attributes) *(.eh_frame) #ifndef CONFIG_RELOCATABLE *(.rela*) #endif } }
aixcc-public/challenge-001-exemplar-source
10,083
arch/powerpc/kernel/misc_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains miscellaneous low-level functions. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) */ #include <linux/sys.h> #include <asm/unistd.h> #include <asm/errno.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cache.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/kexec.h> #include <asm/ptrace.h> #include <asm/mmu.h> #include <asm/export.h> #include <asm/feature-fixups.h> .text _GLOBAL(__bswapdi2) EXPORT_SYMBOL(__bswapdi2) srdi r8,r3,32 rlwinm r7,r3,8,0xffffffff rlwimi r7,r3,24,0,7 rlwinm r9,r8,8,0xffffffff rlwimi r7,r3,24,16,23 rlwimi r9,r8,24,0,7 rlwimi r9,r8,24,16,23 sldi r7,r7,32 or r3,r7,r9 blr #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX _GLOBAL(rmci_on) sync isync li r3,0x100 rldicl r3,r3,32,0 mfspr r5,SPRN_HID4 or r5,r5,r3 sync mtspr SPRN_HID4,r5 isync slbia isync sync blr _GLOBAL(rmci_off) sync isync li r3,0x100 rldicl r3,r3,32,0 mfspr r5,SPRN_HID4 andc r5,r5,r3 sync mtspr SPRN_HID4,r5 isync slbia isync sync blr #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) /* * Do an IO access in real mode */ _GLOBAL(real_readb) mfmsr r7 ori r0,r7,MSR_DR xori r0,r0,MSR_DR sync mtmsrd r0 sync isync mfspr r6,SPRN_HID4 rldicl r5,r6,32,0 ori r5,r5,0x100 rldicl r5,r5,32,0 sync mtspr SPRN_HID4,r5 isync slbia isync lbz r3,0(r3) sync mtspr SPRN_HID4,r6 isync slbia isync mtmsrd r7 sync isync blr /* * Do an IO access in real mode */ _GLOBAL(real_writeb) mfmsr r7 ori r0,r7,MSR_DR xori r0,r0,MSR_DR sync mtmsrd r0 sync isync mfspr r6,SPRN_HID4 rldicl r5,r6,32,0 ori r5,r5,0x100 rldicl r5,r5,32,0 sync mtspr SPRN_HID4,r5 isync slbia isync stb r3,0(r4) sync mtspr SPRN_HID4,r6 isync slbia isync mtmsrd r7 sync isync blr #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ #ifdef CONFIG_PPC_PASEMI _GLOBAL(real_205_readb) mfmsr r7 ori r0,r7,MSR_DR xori r0,r0,MSR_DR sync mtmsrd r0 sync isync LBZCIX(R3,R0,R3) isync mtmsrd r7 sync isync blr _GLOBAL(real_205_writeb) mfmsr r7 ori r0,r7,MSR_DR xori r0,r0,MSR_DR sync mtmsrd r0 sync isync STBCIX(R3,R0,R4) isync mtmsrd r7 sync isync blr #endif /* CONFIG_PPC_PASEMI */ #if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE) /* * SCOM access functions for 970 (FX only for now) * * unsigned long scom970_read(unsigned int address); * void scom970_write(unsigned int address, unsigned long value); * * The address passed in is the 24 bits register address. This code * is 970 specific and will not check the status bits, so you should * know what you are doing. */ _GLOBAL(scom970_read) /* interrupts off */ mfmsr r4 ori r0,r4,MSR_EE xori r0,r0,MSR_EE mtmsrd r0,1 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits * (including parity). On current CPUs they must be 0'd, * and finally or in RW bit */ rlwinm r3,r3,8,0,15 ori r3,r3,0x8000 /* do the actual scom read */ sync mtspr SPRN_SCOMC,r3 isync mfspr r3,SPRN_SCOMD isync mfspr r0,SPRN_SCOMC isync /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah * that's the best we can do). Not implemented yet as we don't use * the scom on any of the bogus CPUs yet, but may have to be done * ultimately */ /* restore interrupts */ mtmsrd r4,1 blr _GLOBAL(scom970_write) /* interrupts off */ mfmsr r5 ori r0,r5,MSR_EE xori r0,r0,MSR_EE mtmsrd r0,1 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits * (including parity). On current CPUs they must be 0'd. */ rlwinm r3,r3,8,0,15 sync mtspr SPRN_SCOMD,r4 /* write data */ isync mtspr SPRN_SCOMC,r3 /* write command */ isync mfspr 3,SPRN_SCOMC isync /* restore interrupts */ mtmsrd r5,1 blr #endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ /* kexec_wait(phys_cpu) * * wait for the flag to change, indicating this kernel is going away but * the slave code for the next one is at addresses 0 to 100. * * This is used by all slaves, even those that did not find a matching * paca in the secondary startup code. * * Physical (hardware) cpu id should be in r3. */ _GLOBAL(kexec_wait) bcl 20,31,$+4 1: mflr r5 addi r5,r5,kexec_flag-1b 99: HMT_LOW #ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */ lwz r4,0(r5) cmpwi 0,r4,0 beq 99b #ifdef CONFIG_PPC_BOOK3S_64 li r10,0x60 mfmsr r11 clrrdi r11,r11,1 /* Clear MSR_LE */ mtsrr0 r10 mtsrr1 r11 rfid #else /* Create TLB entry in book3e_secondary_core_init */ li r4,0 ba 0x60 #endif #endif /* this can be in text because we won't change it until we are * running in real anyways */ kexec_flag: .long 0 #ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_PPC_BOOK3E_64 /* * BOOK3E has no real MMU mode, so we have to setup the initial TLB * for a core to identity map v:0 to p:0. This current implementation * assumes that 1G is enough for kexec. */ kexec_create_tlb: /* * Invalidate all non-IPROT TLB entries to avoid any TLB conflict. * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict. */ PPC_TLBILX_ALL(0,R0) sync isync mfspr r10,SPRN_TLB1CFG andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */ subi r10,r10,1 /* Last entry: no conflict with kernel text */ lis r9,MAS0_TLBSEL(1)@h rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */ /* Set up a temp identity mapping v:0 to p:0 and return to it. */ mtspr SPRN_MAS0,r9 lis r9,(MAS1_VALID|MAS1_IPROT)@h ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l mtspr SPRN_MAS1,r9 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED) mtspr SPRN_MAS2,r9 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX) mtspr SPRN_MAS3,r9 li r9,0 mtspr SPRN_MAS7,r9 tlbwe isync blr #endif /* kexec_smp_wait(void) * * call with interrupts off * note: this is a terminal routine, it does not save lr * * get phys id from paca * switch to real mode * mark the paca as no longer used * join other cpus in kexec_wait(phys_id) */ _GLOBAL(kexec_smp_wait) lhz r3,PACAHWCPUID(r13) bl real_mode li r4,KEXEC_STATE_REAL_MODE stb r4,PACAKEXECSTATE(r13) b kexec_wait /* * switch to real mode (turn mmu off) * we use the early kernel trick that the hardware ignores bits * 0 and 1 (big endian) of the effective address in real mode * * don't overwrite r3 here, it is live for kexec_wait above. */ real_mode: /* assume normal blr return */ #ifdef CONFIG_PPC_BOOK3E_64 /* Create an identity mapping. */ b kexec_create_tlb #else 1: li r9,MSR_RI li r10,MSR_DR|MSR_IR mflr r11 /* return address to SRR0 */ mfmsr r12 andc r9,r12,r9 andc r10,r12,r10 mtmsrd r9,1 mtspr SPRN_SRR1,r10 mtspr SPRN_SRR0,r11 rfid #endif /* * kexec_sequence(newstack, start, image, control, clear_all(), copy_with_mmu_off) * * does the grungy work with stack switching and real mode switches * also does simple calls to other code */ _GLOBAL(kexec_sequence) mflr r0 std r0,16(r1) /* switch stacks to newstack -- &kexec_stack.stack */ stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) mr r1,r3 li r0,0 std r0,16(r1) /* save regs for local vars on new stack. * yes, we won't go back, but ... */ std r31,-8(r1) std r30,-16(r1) std r29,-24(r1) std r28,-32(r1) std r27,-40(r1) std r26,-48(r1) std r25,-56(r1) stdu r1,-STACK_FRAME_OVERHEAD-64(r1) /* save args into preserved regs */ mr r31,r3 /* newstack (both) */ mr r30,r4 /* start (real) */ mr r29,r5 /* image (virt) */ mr r28,r6 /* control, unused */ mr r27,r7 /* clear_all() fn desc */ mr r26,r8 /* copy_with_mmu_off */ lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ /* disable interrupts, we are overwriting kernel data next */ #ifdef CONFIG_PPC_BOOK3E_64 wrteei 0 #else mfmsr r3 rlwinm r3,r3,0,17,15 mtmsrd r3,1 #endif /* We need to turn the MMU off unless we are in hash mode * under a hypervisor */ cmpdi r26,0 beq 1f bl real_mode 1: /* copy dest pages, flush whole dest image */ mr r3,r29 bl kexec_copy_flush /* (image) */ /* turn off mmu now if not done earlier */ cmpdi r26,0 bne 1f bl real_mode /* copy 0x100 bytes starting at start to 0 */ 1: li r3,0 mr r4,r30 /* start, aka phys mem offset */ li r5,0x100 li r6,0 bl copy_and_flush /* (dest, src, copy limit, start offset) */ 1: /* assume normal blr return */ /* release other cpus to the new kernel secondary start at 0x60 */ mflr r5 li r6,1 stw r6,kexec_flag-1b(5) cmpdi r27,0 beq 1f /* clear out hardware hash page table and tlb */ #ifdef CONFIG_PPC64_ELF_ABI_V1 ld r12,0(r27) /* deref function descriptor */ #else mr r12,r27 #endif mtctr r12 bctrl /* mmu_hash_ops.hpte_clear_all(void); */ /* * kexec image calling is: * the first 0x100 bytes of the entry point are copied to 0 * * all slaves branch to slave = 0x60 (absolute) * slave(phys_cpu_id); * * master goes to start = entry point * start(phys_cpu_id, start, 0); * * * a wrapper is needed to call existing kernels, here is an approximate * description of one method: * * v2: (2.6.10) * start will be near the boot_block (maybe 0x100 bytes before it?) * it will have a 0x60, which will b to boot_block, where it will wait * and 0 will store phys into struct boot-block and load r3 from there, * copy kernel 0-0x100 and tell slaves to back down to 0x60 again * * v1: (2.6.9) * boot block will have all cpus scanning device tree to see if they * are the boot cpu ????? * other device tree differences (prop sizes, va vs pa, etc)... */ 1: mr r3,r25 # my phys cpu mr r4,r30 # start, aka phys mem offset mtlr 4 li r5,0 blr /* image->start(physid, image->start, 0); */ #endif /* CONFIG_KEXEC_CORE */
aixcc-public/challenge-001-exemplar-source
1,459
arch/powerpc/kernel/cpu_setup_44x.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains low level CPU setup functions. * Valentine Barshak <vbarshak@ru.mvista.com> * MontaVista Software, Inc (c) 2007 * * Based on cpu_setup_6xx code by * Benjamin Herrenschmidt <benh@kernel.crashing.org> */ #include <asm/processor.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> _GLOBAL(__setup_cpu_440ep) b __init_fpu_44x _GLOBAL(__setup_cpu_440epx) mflr r4 bl __init_fpu_44x bl __plb_disable_wrp bl __fixup_440A_mcheck mtlr r4 blr _GLOBAL(__setup_cpu_440grx) mflr r4 bl __plb_disable_wrp bl __fixup_440A_mcheck mtlr r4 blr _GLOBAL(__setup_cpu_460ex) _GLOBAL(__setup_cpu_460gt) _GLOBAL(__setup_cpu_460sx) _GLOBAL(__setup_cpu_apm821xx) mflr r4 bl __init_fpu_44x bl __fixup_440A_mcheck mtlr r4 blr _GLOBAL(__setup_cpu_440x5) _GLOBAL(__setup_cpu_440gx) _GLOBAL(__setup_cpu_440spe) b __fixup_440A_mcheck /* enable APU between CPU and FPU */ _GLOBAL(__init_fpu_44x) mfspr r3,SPRN_CCR0 /* Clear DAPUIB flag in CCR0 */ rlwinm r3,r3,0,12,10 mtspr SPRN_CCR0,r3 isync blr /* * Workaround for the incorrect write to DDR SDRAM errata. * The write address can be corrupted during writes to * DDR SDRAM when write pipelining is enabled on PLB0. * Disable write pipelining here. */ #define DCRN_PLB4A0_ACR 0x81 _GLOBAL(__plb_disable_wrp) mfdcr r3,DCRN_PLB4A0_ACR /* clear WRP bit in PLB4A0_ACR */ rlwinm r3,r3,0,8,6 mtdcr DCRN_PLB4A0_ACR,r3 isync blr
aixcc-public/challenge-001-exemplar-source
25,491
arch/powerpc/kernel/head_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com * * This file contains the entry point for the 64-bit kernel along * with some early initialization code common to all 64-bit powerpc * variants. */ #include <linux/threads.h> #include <linux/init.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/head-64.h> #include <asm/asm-offsets.h> #include <asm/bug.h> #include <asm/cputable.h> #include <asm/setup.h> #include <asm/hvcall.h> #include <asm/thread_info.h> #include <asm/firmware.h> #include <asm/page_64.h> #include <asm/irqflags.h> #include <asm/kvm_book3s_asm.h> #include <asm/ptrace.h> #include <asm/hw_irq.h> #include <asm/cputhreads.h> #include <asm/ppc-opcode.h> #include <asm/export.h> #include <asm/feature-fixups.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/exception-64s.h> #else #include <asm/exception-64e.h> #endif /* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow * using the layout described in exceptions-64s.S */ /* * Entering into this code we make the following assumptions: * * For pSeries or server processors: * 1. The MMU is off & open firmware is running in real mode. * 2. The primary CPU enters at __start. * 3. If the RTAS supports "query-cpu-stopped-state", then secondary * CPUs will enter as directed by "start-cpu" RTAS call, which is * generic_secondary_smp_init, with PIR in r3. * 4. Else the secondary CPUs will enter at secondary_hold (0x60) as * directed by the "start-cpu" RTS call, with PIR in r3. * -or- For OPAL entry: * 1. The MMU is off, processor in HV mode. * 2. The primary CPU enters at 0 with device-tree in r3, OPAL base * in r8, and entry in r9 for debugging purposes. * 3. Secondary CPUs enter as directed by OPAL_START_CPU call, which * is at generic_secondary_smp_init, with PIR in r3. * * For Book3E processors: * 1. The MMU is on running in AS0 in a state defined in ePAPR * 2. The kernel is entered at __start */ OPEN_FIXED_SECTION(first_256B, 0x0, 0x100) USE_FIXED_SECTION(first_256B) /* * Offsets are relative from the start of fixed section, and * first_256B starts at 0. Offsets are a bit easier to use here * than the fixed section entry macros. */ . = 0x0 _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION FIXUP_ENDIAN b __start_initialization_multiplatform END_FTR_SECTION(0, 1) /* Catch branch to 0 in real mode */ trap /* Secondary processors spin on this value until it becomes non-zero. * When non-zero, it contains the real address of the function the cpu * should jump to. */ .balign 8 .globl __secondary_hold_spinloop __secondary_hold_spinloop: .8byte 0x0 /* Secondary processors write this value with their cpu # */ /* after they enter the spin loop immediately below. */ .globl __secondary_hold_acknowledge __secondary_hold_acknowledge: .8byte 0x0 #ifdef CONFIG_RELOCATABLE /* This flag is set to 1 by a loader if the kernel should run * at the loaded address instead of the linked address. This * is used by kexec-tools to keep the kdump kernel in the * crash_kernel region. The loader is responsible for * observing the alignment requirement. */ #ifdef CONFIG_RELOCATABLE_TEST #define RUN_AT_LOAD_DEFAULT 1 /* Test relocation, do not copy to 0 */ #else #define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "run0" -- relocate to 0 by default */ #endif /* Do not move this variable as kexec-tools knows about it. */ . = 0x5c .globl __run_at_load __run_at_load: DEFINE_FIXED_SYMBOL(__run_at_load, first_256B) .long RUN_AT_LOAD_DEFAULT #endif . = 0x60 /* * The following code is used to hold secondary processors * in a spin loop after they have entered the kernel, but * before the bulk of the kernel has been relocated. This code * is relocated to physical address 0x60 before prom_init is run. * All of it must fit below the first exception vector at 0x100. * Use .globl here not _GLOBAL because we want __secondary_hold * to be the actual text address, not a descriptor. */ .globl __secondary_hold __secondary_hold: FIXUP_ENDIAN #ifndef CONFIG_PPC_BOOK3E_64 mfmsr r24 ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ #endif /* Grab our physical cpu number */ mr r24,r3 /* stash r4 for book3e */ mr r25,r4 /* Tell the master cpu we're here */ /* Relocation is off & we are located at an address less */ /* than 0x100, so only need to grab low order offset. */ std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0) sync li r26,0 #ifdef CONFIG_PPC_BOOK3E_64 tovirt(r26,r26) #endif /* All secondary cpus wait here until told to start. */ 100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(r26) cmpdi 0,r12,0 beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) #ifdef CONFIG_PPC_BOOK3E_64 tovirt(r12,r12) #endif mtctr r12 mr r3,r24 /* * it may be the case that other platforms have r4 right to * begin with, this gives us some safety in case it is not */ #ifdef CONFIG_PPC_BOOK3E_64 mr r4,r25 #else li r4,0 #endif /* Make sure that patched code is visible */ isync bctr #else 0: trap EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 #endif CLOSE_FIXED_SECTION(first_256B) /* * On server, we include the exception vectors code here as it * relies on absolute addressing which is only possible within * this compilation unit */ #ifdef CONFIG_PPC_BOOK3S #include "exceptions-64s.S" #else OPEN_TEXT_SECTION(0x100) #endif USE_TEXT_SECTION() #include "interrupt_64.S" #ifdef CONFIG_PPC_BOOK3E_64 /* * The booting_thread_hwid holds the thread id we want to boot in cpu * hotplug case. It is set by cpu hotplug code, and is invalid by default. * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID] * bit field. */ .globl booting_thread_hwid booting_thread_hwid: .long INVALID_THREAD_HWID .align 3 /* * start a thread in the same core * input parameters: * r3 = the thread physical id * r4 = the entry point where thread starts */ _GLOBAL(book3e_start_thread) LOAD_REG_IMMEDIATE(r5, MSR_KERNEL) cmpwi r3, 0 beq 10f cmpwi r3, 1 beq 11f /* If the thread id is invalid, just exit. */ b 13f 10: MTTMR(TMRN_IMSR0, 5) MTTMR(TMRN_INIA0, 4) b 12f 11: MTTMR(TMRN_IMSR1, 5) MTTMR(TMRN_INIA1, 4) 12: isync li r6, 1 sld r6, r6, r3 mtspr SPRN_TENS, r6 13: blr /* * stop a thread in the same core * input parameter: * r3 = the thread physical id */ _GLOBAL(book3e_stop_thread) cmpwi r3, 0 beq 10f cmpwi r3, 1 beq 10f /* If the thread id is invalid, just exit. */ b 13f 10: li r4, 1 sld r4, r4, r3 mtspr SPRN_TENC, r4 13: blr _GLOBAL(fsl_secondary_thread_init) mfspr r4,SPRN_BUCSR /* Enable branch prediction */ lis r3,BUCSR_INIT@h ori r3,r3,BUCSR_INIT@l mtspr SPRN_BUCSR,r3 isync /* * Fix PIR to match the linear numbering in the device tree. * * On e6500, the reset value of PIR uses the low three bits for * the thread within a core, and the upper bits for the core * number. There are two threads per core, so shift everything * but the low bit right by two bits so that the cpu numbering is * continuous. * * If the old value of BUCSR is non-zero, this thread has run * before. Thus, we assume we are coming from kexec or a similar * scenario, and PIR is already set to the correct value. This * is a bit of a hack, but there are limited opportunities for * getting information into the thread and the alternatives * seemed like they'd be overkill. We can't tell just by looking * at the old PIR value which state it's in, since the same value * could be valid for one thread out of reset and for a different * thread in Linux. */ mfspr r3, SPRN_PIR cmpwi r4,0 bne 1f rlwimi r3, r3, 30, 2, 30 mtspr SPRN_PIR, r3 1: mr r24,r3 /* turn on 64-bit mode */ bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ bl relative_toc tovirt(r2,r2) /* Book3E initialization */ mr r3,r24 bl book3e_secondary_thread_init b generic_secondary_common_init #endif /* CONFIG_PPC_BOOK3E_64 */ /* * On pSeries and most other platforms, secondary processors spin * in the following code. * At entry, r3 = this processor's number (physical cpu id) * * On Book3E, r4 = 1 to indicate that the initial TLB entry for * this core already exists (setup via some other mechanism such * as SCOM before entry). */ _GLOBAL(generic_secondary_smp_init) FIXUP_ENDIAN mr r24,r3 mr r25,r4 /* turn on 64-bit mode */ bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ bl relative_toc tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E_64 /* Book3E initialization */ mr r3,r24 mr r4,r25 bl book3e_secondary_core_init /* * After common core init has finished, check if the current thread is the * one we wanted to boot. If not, start the specified thread and stop the * current thread. */ LOAD_REG_ADDR(r4, booting_thread_hwid) lwz r3, 0(r4) li r5, INVALID_THREAD_HWID cmpw r3, r5 beq 20f /* * The value of booting_thread_hwid has been stored in r3, * so make it invalid. */ stw r5, 0(r4) /* * Get the current thread id and check if it is the one we wanted. * If not, start the one specified in booting_thread_hwid and stop * the current thread. */ mfspr r8, SPRN_TIR cmpw r3, r8 beq 20f /* start the specified thread */ LOAD_REG_ADDR(r5, fsl_secondary_thread_init) ld r4, 0(r5) bl book3e_start_thread /* stop the current thread */ mr r3, r8 bl book3e_stop_thread 10: b 10b 20: #endif generic_secondary_common_init: /* Set up a paca value for this processor. Since we have the * physical cpu id in r24, we need to search the pacas to find * which logical id maps to our physical one. */ #ifndef CONFIG_SMP b kexec_wait /* wait for next kernel if !SMP */ #else LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */ ld r8,0(r8) /* Get base vaddr of array */ #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS) LOAD_REG_IMMEDIATE(r7, NR_CPUS) #else LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ lwz r7,0(r7) /* also the max paca allocated */ #endif li r5,0 /* logical cpu id */ 1: sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */ ldx r13,r9,r8 /* r13 = paca_ptrs[cpu id] */ lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ cmpw r6,r24 /* Compare to our id */ beq 2f addi r5,r5,1 cmpw r5,r7 /* Check if more pacas exist */ blt 1b mr r3,r24 /* not found, copy phys to r3 */ b kexec_wait /* next kernel might do better */ 2: SET_PACA(r13) #ifdef CONFIG_PPC_BOOK3E_64 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ mtspr SPRN_SPRG_TLB_EXFRAME,r12 #endif /* From now on, r24 is expected to be logical cpuid */ mr r24,r5 /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) ld r23,0(r23) ld r12,CPU_SPEC_RESTORE(r23) cmpdi 0,r12,0 beq 3f #ifdef CONFIG_PPC64_ELF_ABI_V1 ld r12,0(r12) #endif mtctr r12 bctrl 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ lwarx r4,0,r3 subi r4,r4,1 stwcx. r4,0,r3 bne 3b isync 4: HMT_LOW lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ /* start. */ cmpwi 0,r23,0 beq 4b /* Loop until told to go */ sync /* order paca.run and cur_cpu_spec */ isync /* In case code patching happened */ b __secondary_start #endif /* SMP */ /* * Turn the MMU off. * Assumes we're mapped EA == RA if the MMU is on. */ #ifdef CONFIG_PPC_BOOK3S __mmu_off: mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR beqlr mflr r4 andc r3,r3,r0 mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 sync rfid b . /* prevent speculative execution */ #endif /* * Here is our main kernel entry point. We support currently 2 kind of entries * depending on the value of r5. * * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content * in r3...r7 * * r5 == NULL -> kexec style entry. r3 is a physical pointer to the * DT block, r4 is a physical pointer to the kernel itself * */ __start_initialization_multiplatform: /* Make sure we are running in 64 bits mode */ bl enable_64b_mode /* Zero r13 (paca) so early program check / mce don't use it */ li r13,0 /* Get TOC pointer (current runtime address) */ bl relative_toc /* find out where we are now */ bcl 20,31,$+4 0: mflr r26 /* r26 = runtime addr here */ addis r26,r26,(_stext - 0b)@ha addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ /* * Are we booted from a PROM Of-type client-interface ? */ cmpldi cr0,r5,0 beq 1f b __boot_from_prom /* yes -> prom */ 1: /* Save parameters */ mr r31,r3 mr r30,r4 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL /* Save OPAL entry */ mr r28,r8 mr r29,r9 #endif #ifdef CONFIG_PPC_BOOK3E_64 bl start_initialization_book3e b __after_prom_start #else /* Setup some critical 970 SPRs before switching MMU off */ mfspr r0,SPRN_PVR srwi r0,r0,16 cmpwi r0,0x39 /* 970 */ beq 1f cmpwi r0,0x3c /* 970FX */ beq 1f cmpwi r0,0x44 /* 970MP */ beq 1f cmpwi r0,0x45 /* 970GX */ bne 2f 1: bl __cpu_preinit_ppc970 2: /* Switch off MMU if not already off */ bl __mmu_off b __after_prom_start #endif /* CONFIG_PPC_BOOK3E_64 */ __REF __boot_from_prom: #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE /* Save parameters */ mr r31,r3 mr r30,r4 mr r29,r5 mr r28,r6 mr r27,r7 /* * Align the stack to 16-byte boundary * Depending on the size and layout of the ELF sections in the initial * boot binary, the stack pointer may be unaligned on PowerMac */ rldicr r1,r1,0,59 #ifdef CONFIG_RELOCATABLE /* Relocate code for where we are now */ mr r3,r26 bl relocate #endif /* Restore parameters */ mr r3,r31 mr r4,r30 mr r5,r29 mr r6,r28 mr r7,r27 /* Do all of the interaction with OF client interface */ mr r8,r26 bl prom_init #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ /* We never return. We also hit that trap if trying to boot * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ trap .previous __after_prom_start: #ifdef CONFIG_RELOCATABLE /* process relocations for the final address of the kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ sldi r25,r25,32 #if defined(CONFIG_PPC_BOOK3E_64) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ #endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) #if defined(CONFIG_PPC_BOOK3E_64) tophys(r26,r26) #endif cmplwi cr0,r7,1 /* flagged to stay where we are ? */ bne 1f add r25,r25,r26 1: mr r3,r25 bl relocate #if defined(CONFIG_PPC_BOOK3E_64) /* IVPR needs to be set after relocation. */ bl init_core_book3e #endif #endif /* * We need to run with _stext at physical address PHYSICAL_START. * This will leave some code in the first 256B of * real memory, which are reserved for software use. * * Note: This process overwrites the OF exception vectors. */ li r3,0 /* target addr */ #ifdef CONFIG_PPC_BOOK3E_64 tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ #endif mr. r4,r26 /* In some cases the loader may */ #if defined(CONFIG_PPC_BOOK3E_64) tovirt(r4,r4) #endif beq 9f /* have already put us at zero */ li r6,0x100 /* Start offset, the first 0x100 */ /* bytes were copied earlier. */ #ifdef CONFIG_RELOCATABLE /* * Check if the kernel has to be running as relocatable kernel based on the * variable __run_at_load, if it is set the kernel is treated as relocatable * kernel, otherwise it will be moved to PHYSICAL_START */ #if defined(CONFIG_PPC_BOOK3E_64) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ #endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) cmplwi cr0,r7,1 bne 3f #ifdef CONFIG_PPC_BOOK3E_64 LOAD_REG_ADDR(r5, __end_interrupts) LOAD_REG_ADDR(r11, _stext) sub r5,r5,r11 #else /* just copy interrupts */ LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts)) #endif b 5f 3: #endif /* # bytes of memory to copy */ lis r5,(ABS_ADDR(copy_to_here, text))@ha addi r5,r5,(ABS_ADDR(copy_to_here, text))@l bl copy_and_flush /* copy the first n bytes */ /* this includes the code being */ /* executed here. */ /* Jump to the copy of this code that we just made */ addis r8,r3,(ABS_ADDR(4f, text))@ha addi r12,r8,(ABS_ADDR(4f, text))@l mtctr r12 bctr .balign 8 p_end: .8byte _end - copy_to_here 4: /* * Now copy the rest of the kernel up to _end, add * _end - copy_to_here to the copy limit and run again. */ addis r8,r26,(ABS_ADDR(p_end, text))@ha ld r8,(ABS_ADDR(p_end, text))@l(r8) add r5,r5,r8 5: bl copy_and_flush /* copy the rest */ 9: b start_here_multiplatform /* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. * * Note: this routine *only* clobbers r0, r6 and lr */ _GLOBAL(copy_and_flush) addi r5,r5,-8 addi r6,r6,-8 4: li r0,8 /* Use the smallest common */ /* denominator cache line */ /* size. This results in */ /* extra cache line flushes */ /* but operation is correct. */ /* Can't get cache line size */ /* from NACA as it is being */ /* moved too. */ mtctr r0 /* put # words/line in ctr */ 3: addi r6,r6,8 /* copy a cache line */ ldx r0,r6,r4 stdx r0,r6,r3 bdnz 3b dcbst r6,r3 /* write it to memory */ sync icbi r6,r3 /* flush the icache line */ cmpld 0,r6,r5 blt 4b sync addi r5,r5,8 addi r6,r6,8 isync blr _ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Called in real mode */ .align 8 copy_to_here: #ifdef CONFIG_SMP #ifdef CONFIG_PPC_PMAC /* * On PowerMac, secondary processors starts from the reset vector, which * is temporarily turned into a call to one of the functions below. */ .section ".text"; .align 2 ; .globl __secondary_start_pmac_0 __secondary_start_pmac_0: /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ li r24,0 b 1f li r24,1 b 1f li r24,2 b 1f li r24,3 1: _GLOBAL(pmac_secondary_start) /* turn on 64-bit mode */ bl enable_64b_mode li r0,0 mfspr r3,SPRN_HID4 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ sync mtspr SPRN_HID4,r3 isync sync slbia /* get TOC pointer (real address) */ bl relative_toc tovirt(r2,r2) /* Copy some CPU settings from CPU 0 */ bl __restore_cpu_ppc970 /* pSeries do that early though I don't think we really need it */ mfmsr r3 ori r3,r3,MSR_RI mtmsrd r3 /* RI on */ /* Set up a paca value for this processor. */ LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer */ ld r4,0(r4) /* Get base vaddr of paca_ptrs array */ sldi r5,r24,3 /* get paca_ptrs[] index from cpu id */ ldx r13,r5,r4 /* r13 = paca_ptrs[cpu id] */ SET_PACA(r13) /* Save vaddr of paca in an SPRG*/ /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) */ li r0,IRQS_DISABLED stb r0,PACAIRQSOFTMASK(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD b __secondary_start #endif /* CONFIG_PPC_PMAC */ /* * This function is called after the master CPU has released the * secondary processors. The execution environment is relocation off. * The paca for this processor has the following fields initialized at * this point: * 1. Processor number * 2. Segment table pointer (virtual address) * On entry the following are set: * r1 = stack pointer (real addr of temp stack) * r24 = cpu# (in Linux terms) * r13 = paca virtual address * SPRG_PACA = paca virtual address */ .section ".text"; .align 2 ; .globl __secondary_start __secondary_start: /* Set thread priority to MEDIUM */ HMT_MEDIUM /* * Do early setup for this CPU, in particular initialising the MMU so we * can turn it on below. This is a call to C, which is OK, we're still * running on the emergency stack. */ bl early_setup_secondary /* * The primary has initialized our kernel stack for us in the paca, grab * it and put it in r1. We must *not* use it until we turn on the MMU * below, because it may not be inside the RMO. */ ld r1, PACAKSAVE(r13) /* Clear backchain so we get nice backtraces */ li r7,0 mtlr r7 /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) */ li r7,IRQS_DISABLED stb r7,PACAIRQSOFTMASK(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) /* enable MMU and jump to start_secondary */ LOAD_REG_ADDR(r3, start_secondary_prolog) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 RFI_TO_KERNEL b . /* prevent speculative execution */ /* * Running with relocation on at this point. All we want to do is * zero the stack back-chain pointer and get the TOC virtual address * before going into C code. */ start_secondary_prolog: LOAD_PACA_TOC() li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ bl start_secondary b . /* * Reset stack pointer and call start_secondary * to continue with online operation when woken up * from cede in cpu offline. */ _GLOBAL(start_secondary_resume) ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ bl start_secondary b . #endif /* * This subroutine clobbers r11 and r12 */ enable_64b_mode: mfmsr r11 /* grab the current MSR */ #ifdef CONFIG_PPC_BOOK3E_64 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ mtmsr r11 #else /* CONFIG_PPC_BOOK3E_64 */ LOAD_REG_IMMEDIATE(r12, MSR_64BIT) or r11,r11,r12 mtmsrd r11 isync #endif blr /* * This puts the TOC pointer into r2, offset by 0x8000 (as expected * by the toolchain). It computes the correct value for wherever we * are running at the moment, using position-independent code. * * Note: The compiler constructs pointers using offsets from the * TOC in -mcmodel=medium mode. After we relocate to 0 but before * the MMU is on we need our TOC to be a virtual address otherwise * these pointers will be real addresses which may get stored and * accessed later with the MMU on. We use tovirt() at the call * sites to handle this. */ _GLOBAL(relative_toc) mflr r0 bcl 20,31,$+4 0: mflr r11 ld r2,(p_toc - 0b)(r11) add r2,r2,r11 mtlr r0 blr .balign 8 p_toc: .8byte .TOC. - 0b /* * This is where the main kernel code starts. */ __REF start_here_multiplatform: /* set up the TOC */ bl relative_toc tovirt(r2,r2) /* Clear out the BSS. It may have been done in prom_init, * already but that's irrelevant since prom_init will soon * be detached from the kernel completely. Besides, we need * to clear it now for kexec-style entry. */ LOAD_REG_ADDR(r11,__bss_stop) LOAD_REG_ADDR(r8,__bss_start) sub r11,r11,r8 /* bss size */ addi r11,r11,7 /* round up to an even double word */ srdi. r11,r11,3 /* shift right by 3 */ beq 4f addi r8,r8,-8 li r0,0 mtctr r11 /* zero this many doublewords */ 3: stdu r0,8(r8) bdnz 3b 4: #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL /* Setup OPAL entry */ LOAD_REG_ADDR(r11, opal) std r28,0(r11); std r29,8(r11); #endif #ifndef CONFIG_PPC_BOOK3E_64 mfmsr r6 ori r6,r6,MSR_RI mtmsrd r6 /* RI on */ #endif #ifdef CONFIG_RELOCATABLE /* Save the physical address we're running at in kernstart_addr */ LOAD_REG_ADDR(r4, kernstart_addr) clrldi r0,r25,2 std r0,0(r4) #endif /* set up a stack pointer */ LOAD_REG_ADDR(r3,init_thread_union) LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) add r1,r3,r1 li r0,0 stdu r0,-STACK_FRAME_OVERHEAD(r1) /* * Do very early kernel initializations, including initial hash table * and SLB setup before we turn on relocation. */ #ifdef CONFIG_KASAN bl kasan_early_init #endif /* Restore parameters passed from prom_init/kexec */ mr r3,r31 LOAD_REG_ADDR(r12, DOTSYM(early_setup)) mtctr r12 bctrl /* also sets r13 and SPRG_PACA */ LOAD_REG_ADDR(r3, start_here_common) ld r4,PACAKMSR(r13) mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 RFI_TO_KERNEL b . /* prevent speculative execution */ /* This is where all platforms converge execution */ start_here_common: /* relocation is on at this point */ std r1,PACAKSAVE(r13) /* Load the TOC (virtual address) */ LOAD_PACA_TOC() /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) */ li r0,IRQS_DISABLED stb r0,PACAIRQSOFTMASK(r13) li r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) /* Generic kernel entry */ bl start_kernel /* Not reached */ 0: trap EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 .previous
aixcc-public/challenge-001-exemplar-source
2,124
arch/powerpc/kernel/idle_64e.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2010 IBM Corp, Benjamin Herrenschmidt <benh@kernel.crashing.org> * * Generic idle routine for 64 bits e500 processors */ #include <linux/threads.h> #include <asm/reg.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ppc-opcode.h> #include <asm/processor.h> #include <asm/thread_info.h> #include <asm/epapr_hcalls.h> #include <asm/hw_irq.h> /* 64-bit version only for now */ .macro BOOK3E_IDLE name loop _GLOBAL(\name) /* Save LR for later */ mflr r0 std r0,16(r1) /* Hard disable interrupts */ wrteei 0 /* Now check if an interrupt came in while we were soft disabled * since we may otherwise lose it (doorbells etc...). */ lbz r3,PACAIRQHAPPENED(r13) cmpwi cr0,r3,0 bne 2f /* Now we are going to mark ourselves as soft and hard enabled in * order to be able to take interrupts while asleep. We inform lockdep * of that. We don't actually turn interrupts on just yet tho. */ #ifdef CONFIG_TRACE_IRQFLAGS stdu r1,-128(r1) bl trace_hardirqs_on addi r1,r1,128 #endif li r0,IRQS_ENABLED stb r0,PACAIRQSOFTMASK(r13) /* Interrupts will make use return to LR, so get something we want * in there */ bl 1f /* And return (interrupts are on) */ ld r0,16(r1) mtlr r0 blr 1: /* Let's set the _TLF_NAPPING flag so interrupts make us return * to the right spot */ ld r11, PACACURRENT(r13) ld r10,TI_LOCAL_FLAGS(r11) ori r10,r10,_TLF_NAPPING std r10,TI_LOCAL_FLAGS(r11) /* We can now re-enable hard interrupts and go to sleep */ wrteei 1 \loop 2: lbz r10,PACAIRQHAPPENED(r13) ori r10,r10,PACA_IRQ_HARD_DIS stb r10,PACAIRQHAPPENED(r13) blr .endm .macro BOOK3E_IDLE_LOOP 1: PPC_WAIT_v203 b 1b .endm /* epapr_ev_idle_start below is patched with the proper hcall opcodes during kernel initialization */ .macro EPAPR_EV_IDLE_LOOP idle_loop: LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE)) .global epapr_ev_idle_start epapr_ev_idle_start: li r3, -1 nop nop nop b idle_loop .endm BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP BOOK3E_IDLE e500_idle BOOK3E_IDLE_LOOP
aixcc-public/challenge-001-exemplar-source
5,349
arch/powerpc/kernel/reloc_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Code to process dynamic relocations for PPC32. * * Copyrights (C) IBM Corporation, 2011. * Author: Suzuki Poulose <suzuki@in.ibm.com> * * - Based on ppc64 code - reloc_64.S */ #include <asm/ppc_asm.h> /* Dynamic section table entry tags */ DT_RELA = 7 /* Tag for Elf32_Rela section */ DT_RELASZ = 8 /* Size of the Rela relocs */ DT_RELAENT = 9 /* Size of one Rela reloc entry */ STN_UNDEF = 0 /* Undefined symbol index */ STB_LOCAL = 0 /* Local binding for the symbol */ R_PPC_ADDR16_LO = 4 /* Lower half of (S+A) */ R_PPC_ADDR16_HI = 5 /* Upper half of (S+A) */ R_PPC_ADDR16_HA = 6 /* High Adjusted (S+A) */ R_PPC_RELATIVE = 22 /* * r3 = desired final address */ _GLOBAL(relocate) mflr r0 /* Save our LR */ bcl 20,31,$+4 /* Find our current runtime address */ 0: mflr r12 /* Make it accessible */ mtlr r0 lwz r11, (p_dyn - 0b)(r12) add r11, r11, r12 /* runtime address of .dynamic section */ lwz r9, (p_rela - 0b)(r12) add r9, r9, r12 /* runtime address of .rela.dyn section */ lwz r10, (p_st - 0b)(r12) add r10, r10, r12 /* runtime address of _stext section */ lwz r13, (p_sym - 0b)(r12) add r13, r13, r12 /* runtime address of .dynsym section */ /* * Scan the dynamic section for RELA, RELASZ entries */ li r6, 0 li r7, 0 li r8, 0 1: lwz r5, 0(r11) /* ELF_Dyn.d_tag */ cmpwi r5, 0 /* End of ELF_Dyn[] */ beq eodyn cmpwi r5, DT_RELA bne relasz lwz r7, 4(r11) /* r7 = rela.link */ b skip relasz: cmpwi r5, DT_RELASZ bne relaent lwz r8, 4(r11) /* r8 = Total Rela relocs size */ b skip relaent: cmpwi r5, DT_RELAENT bne skip lwz r6, 4(r11) /* r6 = Size of one Rela reloc */ skip: addi r11, r11, 8 b 1b eodyn: /* End of Dyn Table scan */ /* Check if we have found all the entries */ cmpwi r7, 0 beq done cmpwi r8, 0 beq done cmpwi r6, 0 beq done /* * Work out the current offset from the link time address of .rela * section. * cur_offset[r7] = rela.run[r9] - rela.link [r7] * _stext.link[r12] = _stext.run[r10] - cur_offset[r7] * final_offset[r3] = _stext.final[r3] - _stext.link[r12] */ subf r7, r7, r9 /* cur_offset */ subf r12, r7, r10 subf r3, r12, r3 /* final_offset */ subf r8, r6, r8 /* relaz -= relaent */ /* * Scan through the .rela table and process each entry * r9 - points to the current .rela table entry * r13 - points to the symbol table */ /* * Check if we have a relocation based on symbol * r5 will hold the value of the symbol. */ applyrela: lwz r4, 4(r9) /* r4 = rela.r_info */ srwi r5, r4, 8 /* ELF32_R_SYM(r_info) */ cmpwi r5, STN_UNDEF /* sym == STN_UNDEF ? */ beq get_type /* value = 0 */ /* Find the value of the symbol at index(r5) */ slwi r5, r5, 4 /* r5 = r5 * sizeof(Elf32_Sym) */ add r12, r13, r5 /* r12 = &__dyn_sym[Index] */ /* * GNU ld has a bug, where dynamic relocs based on * STB_LOCAL symbols, the value should be assumed * to be zero. - Alan Modra */ /* XXX: Do we need to check if we are using GNU ld ? */ lbz r5, 12(r12) /* r5 = dyn_sym[Index].st_info */ extrwi r5, r5, 4, 24 /* r5 = ELF32_ST_BIND(r5) */ cmpwi r5, STB_LOCAL /* st_value = 0, ld bug */ beq get_type /* We have r5 = 0 */ lwz r5, 4(r12) /* r5 = __dyn_sym[Index].st_value */ get_type: /* Load the relocation type to r4 */ extrwi r4, r4, 8, 24 /* r4 = ELF32_R_TYPE(r_info) = ((char*)r4)[3] */ /* R_PPC_RELATIVE */ cmpwi r4, R_PPC_RELATIVE bne hi16 lwz r4, 0(r9) /* r_offset */ lwz r0, 8(r9) /* r_addend */ add r0, r0, r3 /* final addend */ stwx r0, r4, r7 /* memory[r4+r7]) = (u32)r0 */ b nxtrela /* continue */ /* R_PPC_ADDR16_HI */ hi16: cmpwi r4, R_PPC_ADDR16_HI bne ha16 lwz r4, 0(r9) /* r_offset */ lwz r0, 8(r9) /* r_addend */ add r0, r0, r3 add r0, r0, r5 /* r0 = (S+A+Offset) */ extrwi r0, r0, 16, 0 /* r0 = (r0 >> 16) */ b store_half /* R_PPC_ADDR16_HA */ ha16: cmpwi r4, R_PPC_ADDR16_HA bne lo16 lwz r4, 0(r9) /* r_offset */ lwz r0, 8(r9) /* r_addend */ add r0, r0, r3 add r0, r0, r5 /* r0 = (S+A+Offset) */ extrwi r5, r0, 1, 16 /* Extract bit 16 */ extrwi r0, r0, 16, 0 /* r0 = (r0 >> 16) */ add r0, r0, r5 /* Add it to r0 */ b store_half /* R_PPC_ADDR16_LO */ lo16: cmpwi r4, R_PPC_ADDR16_LO bne unknown_type lwz r4, 0(r9) /* r_offset */ lwz r0, 8(r9) /* r_addend */ add r0, r0, r3 add r0, r0, r5 /* r0 = (S+A+Offset) */ extrwi r0, r0, 16, 16 /* r0 &= 0xffff */ /* Fall through to */ /* Store half word */ store_half: sthx r0, r4, r7 /* memory[r4+r7] = (u16)r0 */ nxtrela: /* * We have to flush the modified instructions to the * main storage from the d-cache. And also, invalidate the * cached instructions in i-cache which has been modified. * * We delay the sync / isync operation till the end, since * we won't be executing the modified instructions until * we return from here. */ dcbst r4,r7 sync /* Ensure the data is flushed before icbi */ icbi r4,r7 unknown_type: cmpwi r8, 0 /* relasz = 0 ? */ ble done add r9, r9, r6 /* move to next entry in the .rela table */ subf r8, r6, r8 /* relasz -= relaent */ b applyrela done: sync /* Wait for the flush to finish */ isync /* Discard prefetched instructions */ blr p_dyn: .long __dynamic_start - 0b p_rela: .long __rela_dyn_start - 0b p_sym: .long __dynamic_symtab - 0b p_st: .long _stext - 0b
aixcc-public/challenge-001-exemplar-source
32,518
arch/powerpc/kernel/head_book3s_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). * * This file contains the low-level support and setup for the * PowerPC platform, including trap and interrupt dispatch. * (The PPC 8xx embedded CPUs use head_8xx.S instead.) */ #include <linux/init.h> #include <linux/pgtable.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/bug.h> #include <asm/kvm_book3s_asm.h> #include <asm/export.h> #include <asm/feature-fixups.h> #include <asm/interrupt.h> #include "head_32.h" #define LOAD_BAT(n, reg, RA, RB) \ /* see the comment for clear_bats() -- Cort */ \ li RA,0; \ mtspr SPRN_IBAT##n##U,RA; \ mtspr SPRN_DBAT##n##U,RA; \ lwz RA,(n*16)+0(reg); \ lwz RB,(n*16)+4(reg); \ mtspr SPRN_IBAT##n##U,RA; \ mtspr SPRN_IBAT##n##L,RB; \ lwz RA,(n*16)+8(reg); \ lwz RB,(n*16)+12(reg); \ mtspr SPRN_DBAT##n##U,RA; \ mtspr SPRN_DBAT##n##L,RB __HEAD _GLOBAL(_stext); /* * _start is defined this way because the XCOFF loader in the OpenFirmware * on the powermac expects the entry point to be a procedure descriptor. */ _GLOBAL(_start); /* * These are here for legacy reasons, the kernel used to * need to look like a coff function entry for the pmac * but we're always started by some kind of bootloader now. * -- Cort */ nop /* used by __secondary_hold on prep (mtx) and chrp smp */ nop /* used by __secondary_hold on prep (mtx) and chrp smp */ nop /* PMAC * Enter here with the kernel text, data and bss loaded starting at * 0, running with virtual == physical mapping. * r5 points to the prom entry point (the client interface handler * address). Address translation is turned on, with the prom * managing the hash table. Interrupts are disabled. The stack * pointer (r1) points to just below the end of the half-meg region * from 0x380000 - 0x400000, which is mapped in already. * * If we are booted from MacOS via BootX, we enter with the kernel * image loaded somewhere, and the following values in registers: * r3: 'BooX' (0x426f6f58) * r4: virtual address of boot_infos_t * r5: 0 * * PREP * This is jumped to on prep systems right after the kernel is relocated * to its proper place in memory by the boot loader. The expected layout * of the regs is: * r3: ptr to residual data * r4: initrd_start or if no initrd then 0 * r5: initrd_end - unused if r4 is 0 * r6: Start of command line string * r7: End of command line string * * This just gets a minimal mmu environment setup so we can call * start_here() to do the real work. * -- Cort */ .globl __start __start: /* * We have to do any OF calls before we map ourselves to KERNELBASE, * because OF may have I/O devices mapped into that area * (particularly on CHRP). */ cmpwi 0,r5,0 beq 1f #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE /* find out where we are now */ bcl 20,31,$+4 0: mflr r8 /* r8 = runtime addr here */ addis r8,r8,(_stext - 0b)@ha addi r8,r8,(_stext - 0b)@l /* current runtime base addr */ bl prom_init #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */ /* We never return. We also hit that trap if trying to boot * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ trap /* * Check for BootX signature when supporting PowerMac and branch to * appropriate trampoline if it's present */ #ifdef CONFIG_PPC_PMAC 1: lis r31,0x426f ori r31,r31,0x6f58 cmpw 0,r3,r31 bne 1f bl bootx_init trap #endif /* CONFIG_PPC_PMAC */ 1: mr r31,r3 /* save device tree ptr */ li r24,0 /* cpu # */ /* * early_init() does the early machine identification and does * the necessary low-level setup and clears the BSS * -- Cort <cort@fsmlabs.com> */ bl early_init /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains * the physical address we are running at, returned by early_init() */ bl mmu_off __after_mmu_off: bl clear_bats bl flush_tlbs bl initial_bats bl load_segment_registers bl reloc_offset bl early_hash_table #if defined(CONFIG_BOOTX_TEXT) bl setup_disp_bat #endif #ifdef CONFIG_PPC_EARLY_DEBUG_CPM bl setup_cpm_bat #endif #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO bl setup_usbgecko_bat #endif /* * Call setup_cpu for CPU 0 and initialize 6xx Idle */ bl reloc_offset li r24,0 /* cpu# */ bl call_setup_cpu /* Call setup_cpu for this CPU */ bl reloc_offset bl init_idle_6xx /* * We need to run with _start at physical address 0. * On CHRP, we are loaded at 0x10000 since OF on CHRP uses * the exception vectors at 0 (and therefore this copy * overwrites OF's exception vectors with our own). * The MMU is off at this point. */ bl reloc_offset mr r26,r3 addis r4,r3,KERNELBASE@h /* current address of _start */ lis r5,PHYSICAL_START@h cmplw 0,r4,r5 /* already running at PHYSICAL_START? */ bne relocate_kernel /* * we now have the 1st 16M of ram mapped with the bats. * prep needs the mmu to be turned on here, but pmac already has it on. * this shouldn't bother the pmac since it just gets turned on again * as we jump to our code at KERNELBASE. -- Cort * Actually no, pmac doesn't have it on any more. BootX enters with MMU * off, and in other cases, we now turn it off before changing BATs above. */ turn_on_mmu: mfmsr r0 ori r0,r0,MSR_DR|MSR_IR|MSR_RI mtspr SPRN_SRR1,r0 lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 rfi /* enables MMU */ /* * We need __secondary_hold as a place to hold the other cpus on * an SMP machine, even when we are running a UP kernel. */ . = 0xc0 /* for prep bootloader */ li r3,1 /* MTX only has 1 cpu */ .globl __secondary_hold __secondary_hold: /* tell the master we're here */ stw r3,__secondary_hold_acknowledge@l(0) #ifdef CONFIG_SMP 100: lwz r4,0(0) /* wait until we're told to start */ cmpw 0,r4,r3 bne 100b /* our cpu # was at addr 0 - go */ mr r24,r3 /* cpu # */ b __secondary_start #else b . #endif /* CONFIG_SMP */ .globl __secondary_hold_spinloop __secondary_hold_spinloop: .long 0 .globl __secondary_hold_acknowledge __secondary_hold_acknowledge: .long -1 /* System reset */ /* core99 pmac starts the seconary here by changing the vector, and putting it back to what it was (unknown_async_exception) when done. */ EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception) /* Machine check */ /* * On CHRP, this is complicated by the fact that we could get a * machine check inside RTAS, and we have no guarantee that certain * critical registers will have the values we expect. The set of * registers that might have bad values includes all the GPRs * and all the BATs. We indicate that we are in RTAS by putting * a non-zero value, the address of the exception frame to use, * in thread.rtas_sp. The machine check handler checks thread.rtas_sp * and uses its value if it is non-zero. * (Other exception handlers assume that r1 is a valid kernel stack * pointer when we take an exception from supervisor mode.) * -- paulus. */ START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck) EXCEPTION_PROLOG_0 #ifdef CONFIG_PPC_CHRP mtspr SPRN_SPRG_SCRATCH2,r1 mfspr r1, SPRN_SPRG_THREAD lwz r1, RTAS_SP(r1) cmpwi cr1, r1, 0 bne cr1, 7f mfspr r1, SPRN_SPRG_SCRATCH2 #endif /* CONFIG_PPC_CHRP */ EXCEPTION_PROLOG_1 7: EXCEPTION_PROLOG_2 0x200 MachineCheck #ifdef CONFIG_PPC_CHRP beq cr1, 1f twi 31, 0, 0 #endif 1: prepare_transfer_to_handler bl machine_check_exception b interrupt_return /* Data access exception. */ START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess) #ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION mtspr SPRN_SPRG_SCRATCH2,r10 mfspr r10, SPRN_SPRG_THREAD stw r11, THR11(r10) mfspr r10, SPRN_DSISR mfcr r11 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h mfspr r10, SPRN_SPRG_THREAD beq hash_page_dsi .Lhash_page_dsi_cont: mtcr r11 lwz r11, THR11(r10) mfspr r10, SPRN_SPRG_SCRATCH2 MMU_FTR_SECTION_ELSE b 1f ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) #endif 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1 EXCEPTION_PROLOG_1 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 prepare_transfer_to_handler lwz r5, _DSISR(r1) andis. r0, r5, DSISR_DABRMATCH@h bne- 1f bl do_page_fault b interrupt_return 1: bl do_break REST_NVGPRS(r1) b interrupt_return /* Instruction access exception. */ START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess) mtspr SPRN_SPRG_SCRATCH0,r10 mtspr SPRN_SPRG_SCRATCH1,r11 mfspr r10, SPRN_SPRG_THREAD mfspr r11, SPRN_SRR0 stw r11, SRR0(r10) mfspr r11, SPRN_SRR1 /* check whether user or kernel */ stw r11, SRR1(r10) mfcr r10 #ifdef CONFIG_PPC_BOOK3S_604 BEGIN_MMU_FTR_SECTION andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */ bne hash_page_isi .Lhash_page_isi_cont: mfspr r11, SPRN_SRR1 /* check whether user or kernel */ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) #endif andi. r11, r11, MSR_PR EXCEPTION_PROLOG_1 EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ stw r5, _DSISR(r11) stw r12, _DAR(r11) prepare_transfer_to_handler bl do_page_fault b interrupt_return /* External interrupt */ EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ) /* Alignment exception */ START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment) EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1 prepare_transfer_to_handler bl alignment_exception REST_NVGPRS(r1) b interrupt_return /* Program check exception */ START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck) EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck prepare_transfer_to_handler bl program_check_exception REST_NVGPRS(r1) b interrupt_return /* Floating-point unavailable */ START_EXCEPTION(0x800, FPUnavailable) #ifdef CONFIG_PPC_FPU BEGIN_FTR_SECTION /* * Certain Freescale cores don't have a FPU and treat fp instructions * as a FP Unavailable exception. Redirect to illegal/emulation handling. */ b ProgramCheck END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable beq 1f bl load_up_fpu /* if from user, just load it up */ b fast_exception_return 1: prepare_transfer_to_handler bl kernel_fp_unavailable_exception b interrupt_return #else b ProgramCheck #endif /* Decrementer */ EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt) EXCEPTION(0xa00, Trap_0a, unknown_exception) EXCEPTION(0xb00, Trap_0b, unknown_exception) /* System call */ START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall) SYSCALL_ENTRY INTERRUPT_SYSCALL EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception) EXCEPTION(0xe00, Trap_0e, unknown_exception) /* * The Altivec unavailable trap is at 0x0f20. Foo. * We effectively remap it to 0x3000. * We include an altivec unavailable exception vector even if * not configured for Altivec, so that you can't panic a * non-altivec kernel running on a machine with altivec just * by executing an altivec instruction. */ START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap) b PerformanceMonitor START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap) b AltiVecUnavailable __HEAD /* * Handle TLB miss for instruction on 603/603e. * Note: we get an alternate set of r0 - r3 to use automatically. */ . = INTERRUPT_INST_TLB_MISS_603 InstructionTLBMiss: /* * r0: scratch * r1: linux style pte ( later becomes ppc hardware pte ) * r2: ptr to linux-style pte * r3: scratch */ /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_IMISS #ifdef CONFIG_MODULES lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 #endif mfspr r2, SPRN_SDR1 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER rlwinm r2, r2, 28, 0xfffff000 #ifdef CONFIG_MODULES bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ #endif 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- InstructionAddressInvalid /* return if no mapping */ rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ lwz r0,0(r2) /* get linux-style pte */ andc. r1,r1,r0 /* check access & ~permission */ bne- InstructionAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */ ori r1, r1, 0xe06 /* clear out reserved bits */ andc r1, r0, r1 /* PP = user? 1 : 0 */ BEGIN_FTR_SECTION rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 tlbli r3 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r3 rfi InstructionAddressInvalid: mfspr r3,SPRN_SRR1 rlwinm r1,r3,9,6,6 /* Get load/store bit */ addis r1,r1,0x2000 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ or r2,r2,r1 mtspr SPRN_SRR1,r2 mfspr r1,SPRN_IMISS /* Get failing address */ rlwinm. r2,r2,0,31,31 /* Check for little endian access */ rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ xor r1,r1,r2 mtspr SPRN_DAR,r1 /* Set fault address */ mfmsr r0 /* Restore "normal" registers */ xoris r0,r0,MSR_TGPR>>16 mtcrf 0x80,r3 /* Restore CR0 */ mtmsr r0 b InstructionAccess /* * Handle TLB miss for DATA Load operation on 603/603e */ . = INTERRUPT_DATA_LOAD_TLB_MISS_603 DataLoadTLBMiss: /* * r0: scratch * r1: linux style pte ( later becomes ppc hardware pte ) * r2: ptr to linux-style pte * r3: scratch */ /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_DMISS lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SDR1 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER rlwinm r2, r2, 28, 0xfffff000 bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ li r1, _PAGE_PRESENT | _PAGE_ACCESSED addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- DataAddressInvalid /* return if no mapping */ rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ lwz r0,0(r2) /* get linux-style pte */ andc. r1,r1,r0 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */ rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */ rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */ xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */ ori r1,r1,0xe04 /* clear out reserved bits */ andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */ BEGIN_FTR_SECTION rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 BEGIN_MMU_FTR_SECTION li r0,1 mfspr r1,SPRN_SPRG_603_LRU rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ slw r0,r0,r2 xor r1,r0,r1 srw r0,r1,r2 mtspr SPRN_SPRG_603_LRU,r1 mfspr r2,SPRN_SRR1 rlwimi r2,r0,31-14,14,14 mtspr SPRN_SRR1,r2 mtcrf 0x80,r2 tlbld r3 rfi MMU_FTR_SECTION_ELSE mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 tlbld r3 rfi ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) DataAddressInvalid: mfspr r3,SPRN_SRR1 rlwinm r1,r3,9,6,6 /* Get load/store bit */ addis r1,r1,0x2000 mtspr SPRN_DSISR,r1 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ mtspr SPRN_SRR1,r2 mfspr r1,SPRN_DMISS /* Get failing address */ rlwinm. r2,r2,0,31,31 /* Check for little endian access */ beq 20f /* Jump if big endian */ xori r1,r1,3 20: mtspr SPRN_DAR,r1 /* Set fault address */ mfmsr r0 /* Restore "normal" registers */ xoris r0,r0,MSR_TGPR>>16 mtcrf 0x80,r3 /* Restore CR0 */ mtmsr r0 b DataAccess /* * Handle TLB miss for DATA Store on 603/603e */ . = INTERRUPT_DATA_STORE_TLB_MISS_603 DataStoreTLBMiss: /* * r0: scratch * r1: linux style pte ( later becomes ppc hardware pte ) * r2: ptr to linux-style pte * r3: scratch */ /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_DMISS lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SDR1 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER rlwinm r2, r2, 28, 0xfffff000 bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- DataAddressInvalid /* return if no mapping */ rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ lwz r0,0(r2) /* get linux-style pte */ andc. r1,r1,r0 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */ li r1,0xe06 /* clear out reserved bits & PP msb */ andc r1,r0,r1 /* PP = user? 1: 0 */ BEGIN_FTR_SECTION rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 BEGIN_MMU_FTR_SECTION li r0,1 mfspr r1,SPRN_SPRG_603_LRU rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ slw r0,r0,r2 xor r1,r0,r1 srw r0,r1,r2 mtspr SPRN_SPRG_603_LRU,r1 mfspr r2,SPRN_SRR1 rlwimi r2,r0,31-14,14,14 mtspr SPRN_SRR1,r2 mtcrf 0x80,r2 tlbld r3 rfi MMU_FTR_SECTION_ELSE mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 tlbld r3 rfi ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) #ifndef CONFIG_ALTIVEC #define altivec_assist_exception unknown_exception #endif #ifndef CONFIG_TAU_INT #define TAUException unknown_async_exception #endif EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception) EXCEPTION(0x1400, SMI, SMIException) EXCEPTION(0x1500, Trap_15, unknown_exception) EXCEPTION(0x1600, Trap_16, altivec_assist_exception) EXCEPTION(0x1700, Trap_17, TAUException) EXCEPTION(0x1800, Trap_18, unknown_exception) EXCEPTION(0x1900, Trap_19, unknown_exception) EXCEPTION(0x1a00, Trap_1a, unknown_exception) EXCEPTION(0x1b00, Trap_1b, unknown_exception) EXCEPTION(0x1c00, Trap_1c, unknown_exception) EXCEPTION(0x1d00, Trap_1d, unknown_exception) EXCEPTION(0x1e00, Trap_1e, unknown_exception) EXCEPTION(0x1f00, Trap_1f, unknown_exception) EXCEPTION(0x2000, RunMode, RunModeException) EXCEPTION(0x2100, Trap_21, unknown_exception) EXCEPTION(0x2200, Trap_22, unknown_exception) EXCEPTION(0x2300, Trap_23, unknown_exception) EXCEPTION(0x2400, Trap_24, unknown_exception) EXCEPTION(0x2500, Trap_25, unknown_exception) EXCEPTION(0x2600, Trap_26, unknown_exception) EXCEPTION(0x2700, Trap_27, unknown_exception) EXCEPTION(0x2800, Trap_28, unknown_exception) EXCEPTION(0x2900, Trap_29, unknown_exception) EXCEPTION(0x2a00, Trap_2a, unknown_exception) EXCEPTION(0x2b00, Trap_2b, unknown_exception) EXCEPTION(0x2c00, Trap_2c, unknown_exception) EXCEPTION(0x2d00, Trap_2d, unknown_exception) EXCEPTION(0x2e00, Trap_2e, unknown_exception) EXCEPTION(0x2f00, Trap_2f, unknown_exception) __HEAD . = 0x3000 #ifdef CONFIG_PPC_BOOK3S_604 .macro save_regs_thread thread stw r0, THR0(\thread) stw r3, THR3(\thread) stw r4, THR4(\thread) stw r5, THR5(\thread) stw r6, THR6(\thread) stw r8, THR8(\thread) stw r9, THR9(\thread) mflr r0 stw r0, THLR(\thread) mfctr r0 stw r0, THCTR(\thread) .endm .macro restore_regs_thread thread lwz r0, THLR(\thread) mtlr r0 lwz r0, THCTR(\thread) mtctr r0 lwz r0, THR0(\thread) lwz r3, THR3(\thread) lwz r4, THR4(\thread) lwz r5, THR5(\thread) lwz r6, THR6(\thread) lwz r8, THR8(\thread) lwz r9, THR9(\thread) .endm hash_page_dsi: save_regs_thread r10 mfdsisr r3 mfdar r4 mfsrr0 r5 mfsrr1 r9 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */ bl hash_page mfspr r10, SPRN_SPRG_THREAD restore_regs_thread r10 b .Lhash_page_dsi_cont hash_page_isi: mr r11, r10 mfspr r10, SPRN_SPRG_THREAD save_regs_thread r10 li r3, 0 lwz r4, SRR0(r10) lwz r9, SRR1(r10) bl hash_page mfspr r10, SPRN_SPRG_THREAD restore_regs_thread r10 mr r10, r11 b .Lhash_page_isi_cont .globl fast_hash_page_return fast_hash_page_return: andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */ mfspr r10, SPRN_SPRG_THREAD restore_regs_thread r10 bne 1f /* DSI */ mtcr r11 lwz r11, THR11(r10) mfspr r10, SPRN_SPRG_SCRATCH2 rfi 1: /* ISI */ mtcr r11 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 rfi #endif /* CONFIG_PPC_BOOK3S_604 */ #ifdef CONFIG_VMAP_STACK vmap_stack_overflow_exception #endif __HEAD AltiVecUnavailable: EXCEPTION_PROLOG 0xf20 AltiVecUnavailable #ifdef CONFIG_ALTIVEC beq 1f bl load_up_altivec /* if from user, just load it up */ b fast_exception_return #endif /* CONFIG_ALTIVEC */ 1: prepare_transfer_to_handler bl altivec_unavailable_exception b interrupt_return __HEAD PerformanceMonitor: EXCEPTION_PROLOG 0xf00 PerformanceMonitor prepare_transfer_to_handler bl performance_monitor_exception b interrupt_return __HEAD /* * This code is jumped to from the startup code to copy * the kernel image to physical address PHYSICAL_START. */ relocate_kernel: lis r3,PHYSICAL_START@h /* Destination base address */ li r6,0 /* Destination offset */ li r5,0x4000 /* # bytes of memory to copy */ bl copy_and_flush /* copy the first 0x4000 bytes */ addi r0,r3,4f@l /* jump to the address of 4f */ mtctr r0 /* in copy and do the rest. */ bctr /* jump to the copy */ 4: lis r5,_end-KERNELBASE@h ori r5,r5,_end-KERNELBASE@l bl copy_and_flush /* copy the rest */ b turn_on_mmu /* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. */ _GLOBAL(copy_and_flush) addi r5,r5,-4 addi r6,r6,-4 4: li r0,L1_CACHE_BYTES/4 mtctr r0 3: addi r6,r6,4 /* copy a cache line */ lwzx r0,r6,r4 stwx r0,r6,r3 bdnz 3b dcbst r6,r3 /* write it to memory */ sync icbi r6,r3 /* flush the icache line */ cmplw 0,r6,r5 blt 4b sync /* additional sync needed on g4 */ isync addi r5,r5,4 addi r6,r6,4 blr #ifdef CONFIG_SMP .globl __secondary_start_mpc86xx __secondary_start_mpc86xx: mfspr r3, SPRN_PIR stw r3, __secondary_hold_acknowledge@l(0) mr r24, r3 /* cpu # */ b __secondary_start .globl __secondary_start_pmac_0 __secondary_start_pmac_0: /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ li r24,0 b 1f li r24,1 b 1f li r24,2 b 1f li r24,3 1: /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 set to map the 0xf0000000 - 0xffffffff region */ mfmsr r0 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ mtmsr r0 isync .globl __secondary_start __secondary_start: /* Copy some CPU settings from CPU 0 */ bl __restore_cpu_setup lis r3,-KERNELBASE@h mr r4,r24 bl call_setup_cpu /* Call setup_cpu for this CPU */ lis r3,-KERNELBASE@h bl init_idle_6xx /* get current's stack and current */ lis r2,secondary_current@ha tophys(r2,r2) lwz r2,secondary_current@l(r2) tophys(r1,r2) lwz r1,TASK_STACK(r1) /* stack */ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD li r0,0 tophys(r3,r1) stw r0,0(r3) /* load up the MMU */ bl load_segment_registers bl load_up_mmu /* ptr to phys current thread */ tophys(r4,r2) addi r4,r4,THREAD /* phys address of our thread_struct */ mtspr SPRN_SPRG_THREAD,r4 BEGIN_MMU_FTR_SECTION lis r4, (swapper_pg_dir - PAGE_OFFSET)@h ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l rlwinm r4, r4, 4, 0xffff01ff mtspr SPRN_SDR1, r4 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) /* enable MMU and jump to start_secondary */ li r4,MSR_KERNEL lis r3,start_secondary@h ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfi #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER #include "../kvm/book3s_rmhandlers.S" #endif /* * Load stuff into the MMU. Intended to be called with * IR=0 and DR=0. */ early_hash_table: sync /* Force all PTE updates to finish */ isync tlbia /* Clear all TLB entries */ sync /* wait for tlbia/tlbie to finish */ TLBSYNC /* ... on all CPUs */ /* Load the SDR1 register (hash table base & size) */ lis r6, early_hash - PAGE_OFFSET@h ori r6, r6, 3 /* 256kB table */ mtspr SPRN_SDR1, r6 blr load_up_mmu: sync /* Force all PTE updates to finish */ isync tlbia /* Clear all TLB entries */ sync /* wait for tlbia/tlbie to finish */ TLBSYNC /* ... on all CPUs */ BEGIN_MMU_FTR_SECTION /* Load the SDR1 register (hash table base & size) */ lis r6,_SDR1@ha tophys(r6,r6) lwz r6,_SDR1@l(r6) mtspr SPRN_SDR1,r6 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) /* Load the BAT registers with the values set up by MMU_init. */ lis r3,BATS@ha addi r3,r3,BATS@l tophys(r3,r3) LOAD_BAT(0,r3,r4,r5) LOAD_BAT(1,r3,r4,r5) LOAD_BAT(2,r3,r4,r5) LOAD_BAT(3,r3,r4,r5) BEGIN_MMU_FTR_SECTION LOAD_BAT(4,r3,r4,r5) LOAD_BAT(5,r3,r4,r5) LOAD_BAT(6,r3,r4,r5) LOAD_BAT(7,r3,r4,r5) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr _GLOBAL(load_segment_registers) li r0, NUM_USER_SEGMENTS /* load up user segment register values */ mtctr r0 /* for context 0 */ #ifdef CONFIG_PPC_KUEP lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */ #else li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */ #endif li r4, 0 3: mtsrin r3, r4 addi r3, r3, 0x111 /* increment VSID */ addis r4, r4, 0x1000 /* address of next segment */ bdnz 3b li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */ mtctr r0 /* for context 0 */ rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */ rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */ oris r3, r3, SR_KP@h /* Kp = 1 */ 3: mtsrin r3, r4 addi r3, r3, 0x111 /* increment VSID */ addis r4, r4, 0x1000 /* address of next segment */ bdnz 3b blr /* * This is where the main kernel code starts. */ start_here: /* ptr to current */ lis r2,init_task@h ori r2,r2,init_task@l /* Set up for using our exception vectors */ /* ptr to phys current thread */ tophys(r4,r2) addi r4,r4,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 BEGIN_MMU_FTR_SECTION lis r4, (swapper_pg_dir - PAGE_OFFSET)@h ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l rlwinm r4, r4, 4, 0xffff01ff mtspr SPRN_SDR1, r4 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) /* stack */ lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) /* * Do early platform-specific initialization, * and set up the MMU. */ #ifdef CONFIG_KASAN bl kasan_early_init #endif li r3,0 mr r4,r31 bl machine_init bl __save_cpu_setup bl MMU_init bl MMU_init_hw_patch /* * Go back to running unmapped so we can load up new values * for SDR1 (hash table pointer) and the segment registers * and change to using our exception vectors. */ lis r4,2f@h ori r4,r4,2f@l tophys(r4,r4) li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) .align 4 mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 rfi /* Load up the kernel context */ 2: bl load_up_mmu #ifdef CONFIG_BDI_SWITCH /* Add helper information for the Abatron bdiGDB debugger. * We do this here because we know the mmu is disabled, and * will be enabled for real in just a few instructions. */ lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l stw r5, 0xf0(0) /* This much match your Abatron config */ lis r6, swapper_pg_dir@h ori r6, r6, swapper_pg_dir@l tophys(r5, r5) stw r6, 0(r5) #endif /* CONFIG_BDI_SWITCH */ /* Now turn on the MMU for real! */ li r4,MSR_KERNEL lis r3,start_kernel@h ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfi /* * An undocumented "feature" of 604e requires that the v bit * be cleared before changing BAT values. * * Also, newer IBM firmware does not clear bat3 and 4 so * this makes sure it's done. * -- Cort */ clear_bats: li r10,0 mtspr SPRN_DBAT0U,r10 mtspr SPRN_DBAT0L,r10 mtspr SPRN_DBAT1U,r10 mtspr SPRN_DBAT1L,r10 mtspr SPRN_DBAT2U,r10 mtspr SPRN_DBAT2L,r10 mtspr SPRN_DBAT3U,r10 mtspr SPRN_DBAT3L,r10 mtspr SPRN_IBAT0U,r10 mtspr SPRN_IBAT0L,r10 mtspr SPRN_IBAT1U,r10 mtspr SPRN_IBAT1L,r10 mtspr SPRN_IBAT2U,r10 mtspr SPRN_IBAT2L,r10 mtspr SPRN_IBAT3U,r10 mtspr SPRN_IBAT3L,r10 BEGIN_MMU_FTR_SECTION /* Here's a tweak: at this point, CPU setup have * not been called yet, so HIGH_BAT_EN may not be * set in HID0 for the 745x processors. However, it * seems that doesn't affect our ability to actually * write to these SPRs. */ mtspr SPRN_DBAT4U,r10 mtspr SPRN_DBAT4L,r10 mtspr SPRN_DBAT5U,r10 mtspr SPRN_DBAT5L,r10 mtspr SPRN_DBAT6U,r10 mtspr SPRN_DBAT6L,r10 mtspr SPRN_DBAT7U,r10 mtspr SPRN_DBAT7L,r10 mtspr SPRN_IBAT4U,r10 mtspr SPRN_IBAT4L,r10 mtspr SPRN_IBAT5U,r10 mtspr SPRN_IBAT5L,r10 mtspr SPRN_IBAT6U,r10 mtspr SPRN_IBAT6L,r10 mtspr SPRN_IBAT7U,r10 mtspr SPRN_IBAT7L,r10 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr _GLOBAL(update_bats) lis r4, 1f@h ori r4, r4, 1f@l tophys(r4, r4) mfmsr r6 mflr r7 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR) rlwinm r0, r6, 0, ~MSR_RI rlwinm r0, r0, 0, ~MSR_EE mtmsr r0 .align 4 mtspr SPRN_SRR0, r4 mtspr SPRN_SRR1, r3 rfi 1: bl clear_bats lis r3, BATS@ha addi r3, r3, BATS@l tophys(r3, r3) LOAD_BAT(0, r3, r4, r5) LOAD_BAT(1, r3, r4, r5) LOAD_BAT(2, r3, r4, r5) LOAD_BAT(3, r3, r4, r5) BEGIN_MMU_FTR_SECTION LOAD_BAT(4, r3, r4, r5) LOAD_BAT(5, r3, r4, r5) LOAD_BAT(6, r3, r4, r5) LOAD_BAT(7, r3, r4, r5) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI) mtmsr r3 mtspr SPRN_SRR0, r7 mtspr SPRN_SRR1, r6 rfi flush_tlbs: lis r10, 0x40 1: addic. r10, r10, -0x1000 tlbie r10 bgt 1b sync blr mmu_off: addi r4, r3, __after_mmu_off - _start mfmsr r3 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ beqlr andc r3,r3,r0 .align 4 mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 sync rfi /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ initial_bats: lis r11,PAGE_OFFSET@h tophys(r8,r11) #ifdef CONFIG_SMP ori r8,r8,0x12 /* R/W access, M=1 */ #else ori r8,r8,2 /* R/W access */ #endif /* CONFIG_SMP */ ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */ mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ mtspr SPRN_IBAT0L,r8 mtspr SPRN_IBAT0U,r11 isync blr #ifdef CONFIG_BOOTX_TEXT setup_disp_bat: /* * setup the display bat prepared for us in prom.c */ mflr r8 bl reloc_offset mtlr r8 addis r8,r3,disp_BAT@ha addi r8,r8,disp_BAT@l cmpwi cr0,r8,0 beqlr lwz r11,0(r8) lwz r8,4(r8) mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3U,r11 blr #endif /* CONFIG_BOOTX_TEXT */ #ifdef CONFIG_PPC_EARLY_DEBUG_CPM setup_cpm_bat: lis r8, 0xf000 ori r8, r8, 0x002a mtspr SPRN_DBAT1L, r8 lis r11, 0xf000 ori r11, r11, (BL_1M << 2) | 2 mtspr SPRN_DBAT1U, r11 blr #endif #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO setup_usbgecko_bat: /* prepare a BAT for early io */ #if defined(CONFIG_GAMECUBE) lis r8, 0x0c00 #elif defined(CONFIG_WII) lis r8, 0x0d00 #else #error Invalid platform for USB Gecko based early debugging. #endif /* * The virtual address used must match the virtual address * associated to the fixmap entry FIX_EARLY_DEBUG_BASE. */ lis r11, 0xfffe /* top 128K */ ori r8, r8, 0x002a /* uncached, guarded ,rw */ ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */ mtspr SPRN_DBAT1L, r8 mtspr SPRN_DBAT1U, r11 blr #endif .data
aixcc-public/challenge-001-exemplar-source
5,709
arch/powerpc/kernel/idle_book3s.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2018, IBM Corporation. * * This file contains general idle entry/exit functions to save * and restore stack and NVGPRs which allows C code to call idle * states that lose GPRs, and it will return transparently with * SRR1 wakeup reason return value. * * The platform / CPU caller must ensure SPRs and any other non-GPR * state is saved and restored correctly, handle KVM, interrupts, etc. */ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ppc-opcode.h> #include <asm/cpuidle.h> #include <asm/thread_info.h> /* TLF_NAPPING */ #ifdef CONFIG_PPC_P7_NAP /* * Desired PSSCR in r3 * * No state will be lost regardless of wakeup mechanism (interrupt or NIA). * * An EC=0 type wakeup will return with a value of 0. SRESET wakeup (which can * happen with xscom SRESET and possibly MCE) may clobber volatiles except LR, * and must blr, to return to caller with r3 set according to caller's expected * return code (for Book3S/64 that is SRR1). */ _GLOBAL(isa300_idle_stop_noloss) mtspr SPRN_PSSCR,r3 PPC_STOP li r3,0 blr /* * Desired PSSCR in r3 * * GPRs may be lost, so they are saved here. Wakeup is by interrupt only. * The SRESET wakeup returns to this function's caller by calling * idle_return_gpr_loss with r3 set to desired return value. * * A wakeup without GPR loss may alteratively be handled as in * isa300_idle_stop_noloss and blr directly, as an optimisation. * * The caller is responsible for saving/restoring SPRs, MSR, timebase, * etc. */ _GLOBAL(isa300_idle_stop_mayloss) mtspr SPRN_PSSCR,r3 std r1,PACAR1(r13) mflr r4 mfcr r5 /* * Use the stack red zone rather than a new frame for saving regs since * in the case of no GPR loss the wakeup code branches directly back to * the caller without deallocating the stack frame first. */ std r2,-8*1(r1) std r14,-8*2(r1) std r15,-8*3(r1) std r16,-8*4(r1) std r17,-8*5(r1) std r18,-8*6(r1) std r19,-8*7(r1) std r20,-8*8(r1) std r21,-8*9(r1) std r22,-8*10(r1) std r23,-8*11(r1) std r24,-8*12(r1) std r25,-8*13(r1) std r26,-8*14(r1) std r27,-8*15(r1) std r28,-8*16(r1) std r29,-8*17(r1) std r30,-8*18(r1) std r31,-8*19(r1) std r4,-8*20(r1) std r5,-8*21(r1) /* 168 bytes */ PPC_STOP b . /* catch bugs */ /* * Desired return value in r3 * * The idle wakeup SRESET interrupt can call this after calling * to return to the idle sleep function caller with r3 as the return code. * * This must not be used if idle was entered via a _noloss function (use * a simple blr instead). */ _GLOBAL(idle_return_gpr_loss) ld r1,PACAR1(r13) ld r4,-8*20(r1) ld r5,-8*21(r1) mtlr r4 mtcr r5 /* * KVM nap requires r2 to be saved, rather than just restoring it * from PACATOC. This could be avoided for that less common case * if KVM saved its r2. */ ld r2,-8*1(r1) ld r14,-8*2(r1) ld r15,-8*3(r1) ld r16,-8*4(r1) ld r17,-8*5(r1) ld r18,-8*6(r1) ld r19,-8*7(r1) ld r20,-8*8(r1) ld r21,-8*9(r1) ld r22,-8*10(r1) ld r23,-8*11(r1) ld r24,-8*12(r1) ld r25,-8*13(r1) ld r26,-8*14(r1) ld r27,-8*15(r1) ld r28,-8*16(r1) ld r29,-8*17(r1) ld r30,-8*18(r1) ld r31,-8*19(r1) blr /* * This is the sequence required to execute idle instructions, as * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0. * We have to store a GPR somewhere, ptesync, then reload it, and create * a false dependency on the result of the load. It doesn't matter which * GPR we store, or where we store it. We have already stored r2 to the * stack at -8(r1) in isa206_idle_insn_mayloss, so use that. */ #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ std r2,-8(r1); \ ptesync; \ ld r2,-8(r1); \ 236: cmpd cr0,r2,r2; \ bne 236b; \ IDLE_INST; \ b . /* catch bugs */ /* * Desired instruction type in r3 * * GPRs may be lost, so they are saved here. Wakeup is by interrupt only. * The SRESET wakeup returns to this function's caller by calling * idle_return_gpr_loss with r3 set to desired return value. * * A wakeup without GPR loss may alteratively be handled as in * isa300_idle_stop_noloss and blr directly, as an optimisation. * * The caller is responsible for saving/restoring SPRs, MSR, timebase, * etc. * * This must be called in real-mode (MSR_IDLE). */ _GLOBAL(isa206_idle_insn_mayloss) std r1,PACAR1(r13) mflr r4 mfcr r5 /* * Use the stack red zone rather than a new frame for saving regs since * in the case of no GPR loss the wakeup code branches directly back to * the caller without deallocating the stack frame first. */ std r2,-8*1(r1) std r14,-8*2(r1) std r15,-8*3(r1) std r16,-8*4(r1) std r17,-8*5(r1) std r18,-8*6(r1) std r19,-8*7(r1) std r20,-8*8(r1) std r21,-8*9(r1) std r22,-8*10(r1) std r23,-8*11(r1) std r24,-8*12(r1) std r25,-8*13(r1) std r26,-8*14(r1) std r27,-8*15(r1) std r28,-8*16(r1) std r29,-8*17(r1) std r30,-8*18(r1) std r31,-8*19(r1) std r4,-8*20(r1) std r5,-8*21(r1) cmpwi r3,PNV_THREAD_NAP bne 1f IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) 1: cmpwi r3,PNV_THREAD_SLEEP bne 2f IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 2: IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) #endif #ifdef CONFIG_PPC_970_NAP _GLOBAL(power4_idle_nap) LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW) ld r9,PACA_THREAD_INFO(r13) ld r8,TI_LOCAL_FLAGS(r9) ori r8,r8,_TLF_NAPPING std r8,TI_LOCAL_FLAGS(r9) /* * NAPPING bit is set, from this point onward power4_fixup_nap * will cause exceptions to return to power4_idle_nap_return. */ 1: sync isync mtmsrd r7 isync b 1b .globl power4_idle_nap_return power4_idle_nap_return: blr #endif
aixcc-public/challenge-001-exemplar-source
7,450
arch/powerpc/kernel/misc_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains miscellaneous low-level functions. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * */ #include <linux/sys.h> #include <asm/unistd.h> #include <asm/errno.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/cache.h> #include <asm/cputable.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/bug.h> #include <asm/ptrace.h> #include <asm/export.h> #include <asm/feature-fixups.h> .text /* * This returns the high 64 bits of the product of two 64-bit numbers. */ _GLOBAL(mulhdu) cmpwi r6,0 cmpwi cr1,r3,0 mr r10,r4 mulhwu r4,r4,r5 beq 1f mulhwu r0,r10,r6 mullw r7,r10,r5 addc r7,r0,r7 addze r4,r4 1: beqlr cr1 /* all done if high part of A is 0 */ mullw r9,r3,r5 mulhwu r10,r3,r5 beq 2f mullw r0,r3,r6 mulhwu r8,r3,r6 addc r7,r0,r7 adde r4,r4,r8 addze r10,r10 2: addc r4,r4,r9 addze r3,r10 blr /* * reloc_got2 runs through the .got2 section adding an offset * to each entry. */ _GLOBAL(reloc_got2) mflr r11 lis r7,__got2_start@ha addi r7,r7,__got2_start@l lis r8,__got2_end@ha addi r8,r8,__got2_end@l subf r8,r7,r8 srwi. r8,r8,2 beqlr mtctr r8 bcl 20,31,$+4 1: mflr r0 lis r4,1b@ha addi r4,r4,1b@l subf r0,r4,r0 add r7,r0,r7 2: lwz r0,0(r7) add r0,r0,r3 stw r0,0(r7) addi r7,r7,4 bdnz 2b mtlr r11 blr /* * call_setup_cpu - call the setup_cpu function for this cpu * r3 = data offset, r24 = cpu number * * Setup function is called with: * r3 = data offset * r4 = ptr to CPU spec (relocated) */ _GLOBAL(call_setup_cpu) addis r4,r3,cur_cpu_spec@ha addi r4,r4,cur_cpu_spec@l lwz r4,0(r4) add r4,r4,r3 lwz r5,CPU_SPEC_SETUP(r4) cmpwi 0,r5,0 add r5,r5,r3 beqlr mtctr r5 bctr #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32) /* This gets called by via-pmu.c to switch the PLL selection * on 750fx CPU. This function should really be moved to some * other place (as most of the cpufreq code in via-pmu */ _GLOBAL(low_choose_750fx_pll) /* Clear MSR:EE */ mfmsr r7 rlwinm r0,r7,0,17,15 mtmsr r0 /* If switching to PLL1, disable HID0:BTIC */ cmplwi cr0,r3,0 beq 1f mfspr r5,SPRN_HID0 rlwinm r5,r5,0,27,25 sync mtspr SPRN_HID0,r5 isync sync 1: /* Calc new HID1 value */ mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ or r4,r4,r5 mtspr SPRN_HID1,r4 #ifdef CONFIG_SMP /* Store new HID1 image */ lwz r6,TASK_CPU(r2) slwi r6,r6,2 #else li r6, 0 #endif addis r6,r6,nap_save_hid1@ha stw r4,nap_save_hid1@l(r6) /* If switching to PLL0, enable HID0:BTIC */ cmplwi cr0,r3,0 bne 1f mfspr r5,SPRN_HID0 ori r5,r5,HID0_BTIC sync mtspr SPRN_HID0,r5 isync sync 1: /* Return */ mtmsr r7 blr _GLOBAL(low_choose_7447a_dfs) /* Clear MSR:EE */ mfmsr r7 rlwinm r0,r7,0,17,15 mtmsr r0 /* Calc new HID1 value */ mfspr r4,SPRN_HID1 insrwi r4,r3,1,9 /* insert parameter into bit 9 */ sync mtspr SPRN_HID1,r4 sync isync /* Return */ mtmsr r7 blr #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */ #ifdef CONFIG_40x /* * Do an IO access in real mode */ _GLOBAL(real_readb) mfmsr r7 rlwinm r0,r7,0,~MSR_DR sync mtmsr r0 sync isync lbz r3,0(r3) sync mtmsr r7 sync isync blr _ASM_NOKPROBE_SYMBOL(real_readb) /* * Do an IO access in real mode */ _GLOBAL(real_writeb) mfmsr r7 rlwinm r0,r7,0,~MSR_DR sync mtmsr r0 sync isync stb r3,0(r4) sync mtmsr r7 sync isync blr _ASM_NOKPROBE_SYMBOL(real_writeb) #endif /* CONFIG_40x */ /* * Copy a whole page. We use the dcbz instruction on the destination * to reduce memory traffic (it eliminates the unnecessary reads of * the destination into cache). This requires that the destination * is cacheable. */ #define COPY_16_BYTES \ lwz r6,4(r4); \ lwz r7,8(r4); \ lwz r8,12(r4); \ lwzu r9,16(r4); \ stw r6,4(r3); \ stw r7,8(r3); \ stw r8,12(r3); \ stwu r9,16(r3) _GLOBAL(copy_page) rlwinm r5, r3, 0, L1_CACHE_BYTES - 1 addi r3,r3,-4 0: twnei r5, 0 /* WARN if r3 is not cache aligned */ EMIT_WARN_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING addi r4,r4,-4 li r5,4 #if MAX_COPY_PREFETCH > 1 li r0,MAX_COPY_PREFETCH li r11,4 mtctr r0 11: dcbt r11,r4 addi r11,r11,L1_CACHE_BYTES bdnz 11b #else /* MAX_COPY_PREFETCH == 1 */ dcbt r5,r4 li r11,L1_CACHE_BYTES+4 #endif /* MAX_COPY_PREFETCH */ li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH crclr 4*cr0+eq 2: mtctr r0 1: dcbt r11,r4 dcbz r5,r3 COPY_16_BYTES #if L1_CACHE_BYTES >= 32 COPY_16_BYTES #if L1_CACHE_BYTES >= 64 COPY_16_BYTES COPY_16_BYTES #if L1_CACHE_BYTES >= 128 COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES #endif #endif #endif bdnz 1b beqlr crnot 4*cr0+eq,4*cr0+eq li r0,MAX_COPY_PREFETCH li r11,4 b 2b EXPORT_SYMBOL(copy_page) /* * Extended precision shifts. * * Updated to be valid for shift counts from 0 to 63 inclusive. * -- Gabriel * * R3/R4 has 64 bit value * R5 has shift count * result in R3/R4 * * ashrdi3: arithmetic right shift (sign propagation) * lshrdi3: logical right shift * ashldi3: left shift */ _GLOBAL(__ashrdi3) subfic r6,r5,32 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count addi r7,r5,32 # could be xori, or addi with -32 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 sraw r7,r3,r7 # t2 = MSW >> (count-32) or r4,r4,r6 # LSW |= t1 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 sraw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr EXPORT_SYMBOL(__ashrdi3) _GLOBAL(__ashldi3) subfic r6,r5,32 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count addi r7,r5,32 # could be xori, or addi with -32 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) or r3,r3,r6 # MSW |= t1 slw r4,r4,r5 # LSW = LSW << count or r3,r3,r7 # MSW |= t2 blr EXPORT_SYMBOL(__ashldi3) _GLOBAL(__lshrdi3) subfic r6,r5,32 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count addi r7,r5,32 # could be xori, or addi with -32 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) or r4,r4,r6 # LSW |= t1 srw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr EXPORT_SYMBOL(__lshrdi3) /* * 64-bit comparison: __cmpdi2(s64 a, s64 b) * Returns 0 if a < b, 1 if a == b, 2 if a > b. */ _GLOBAL(__cmpdi2) cmpw r3,r5 li r3,1 bne 1f cmplw r4,r6 beqlr 1: li r3,0 bltlr li r3,2 blr EXPORT_SYMBOL(__cmpdi2) /* * 64-bit comparison: __ucmpdi2(u64 a, u64 b) * Returns 0 if a < b, 1 if a == b, 2 if a > b. */ _GLOBAL(__ucmpdi2) cmplw r3,r5 li r3,1 bne 1f cmplw r4,r6 beqlr 1: li r3,0 bltlr li r3,2 blr EXPORT_SYMBOL(__ucmpdi2) _GLOBAL(__bswapdi2) rotlwi r9,r4,8 rotlwi r10,r3,8 rlwimi r9,r4,24,0,7 rlwimi r10,r3,24,0,7 rlwimi r9,r4,24,16,23 rlwimi r10,r3,24,16,23 mr r3,r9 mr r4,r10 blr EXPORT_SYMBOL(__bswapdi2) #ifdef CONFIG_SMP _GLOBAL(start_secondary_resume) /* Reset stack */ rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD li r3,0 stw r3,0(r1) /* Zero the stack frame pointer */ bl start_secondary b . #endif /* CONFIG_SMP */
aixcc-public/challenge-001-exemplar-source
10,069
arch/powerpc/crypto/sha1-spe-asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Fast SHA-1 implementation for SPE instruction set (PPC) * * This code makes use of the SPE SIMD instruction set as defined in * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf * Implementation is based on optimization guide notes from * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf * * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #define rHP r3 /* pointer to hash value */ #define rWP r4 /* pointer to input */ #define rKP r5 /* pointer to constants */ #define rW0 r14 /* 64 bit round words */ #define rW1 r15 #define rW2 r16 #define rW3 r17 #define rW4 r18 #define rW5 r19 #define rW6 r20 #define rW7 r21 #define rH0 r6 /* 32 bit hash values */ #define rH1 r7 #define rH2 r8 #define rH3 r9 #define rH4 r10 #define rT0 r22 /* 64 bit temporary */ #define rT1 r0 /* 32 bit temporaries */ #define rT2 r11 #define rT3 r12 #define rK r23 /* 64 bit constant in volatile register */ #define LOAD_K01 #define LOAD_K11 \ evlwwsplat rK,0(rKP); #define LOAD_K21 \ evlwwsplat rK,4(rKP); #define LOAD_K31 \ evlwwsplat rK,8(rKP); #define LOAD_K41 \ evlwwsplat rK,12(rKP); #define INITIALIZE \ stwu r1,-128(r1); /* create stack frame */ \ evstdw r14,8(r1); /* We must save non volatile */ \ evstdw r15,16(r1); /* registers. Take the chance */ \ evstdw r16,24(r1); /* and save the SPE part too */ \ evstdw r17,32(r1); \ evstdw r18,40(r1); \ evstdw r19,48(r1); \ evstdw r20,56(r1); \ evstdw r21,64(r1); \ evstdw r22,72(r1); \ evstdw r23,80(r1); #define FINALIZE \ evldw r14,8(r1); /* restore SPE registers */ \ evldw r15,16(r1); \ evldw r16,24(r1); \ evldw r17,32(r1); \ evldw r18,40(r1); \ evldw r19,48(r1); \ evldw r20,56(r1); \ evldw r21,64(r1); \ evldw r22,72(r1); \ evldw r23,80(r1); \ xor r0,r0,r0; \ stw r0,8(r1); /* Delete sensitive data */ \ stw r0,16(r1); /* that we might have pushed */ \ stw r0,24(r1); /* from other context that runs */ \ stw r0,32(r1); /* the same code. Assume that */ \ stw r0,40(r1); /* the lower part of the GPRs */ \ stw r0,48(r1); /* were already overwritten on */ \ stw r0,56(r1); /* the way down to here */ \ stw r0,64(r1); \ stw r0,72(r1); \ stw r0,80(r1); \ addi r1,r1,128; /* cleanup stack frame */ #ifdef __BIG_ENDIAN__ #define LOAD_DATA(reg, off) \ lwz reg,off(rWP); /* load data */ #define NEXT_BLOCK \ addi rWP,rWP,64; /* increment per block */ #else #define LOAD_DATA(reg, off) \ lwbrx reg,0,rWP; /* load data */ \ addi rWP,rWP,4; /* increment per word */ #define NEXT_BLOCK /* nothing to do */ #endif #define R_00_15(a, b, c, d, e, w0, w1, k, off) \ LOAD_DATA(w0, off) /* 1: W */ \ and rT2,b,c; /* 1: F' = B and C */ \ LOAD_K##k##1 \ andc rT1,d,b; /* 1: F" = ~B and D */ \ rotrwi rT0,a,27; /* 1: A' = A rotl 5 */ \ or rT2,rT2,rT1; /* 1: F = F' or F" */ \ add e,e,rT0; /* 1: E = E + A' */ \ rotrwi b,b,2; /* 1: B = B rotl 30 */ \ add e,e,w0; /* 1: E = E + W */ \ LOAD_DATA(w1, off+4) /* 2: W */ \ add e,e,rT2; /* 1: E = E + F */ \ and rT1,a,b; /* 2: F' = B and C */ \ add e,e,rK; /* 1: E = E + K */ \ andc rT2,c,a; /* 2: F" = ~B and D */ \ add d,d,rK; /* 2: E = E + K */ \ or rT2,rT2,rT1; /* 2: F = F' or F" */ \ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \ add d,d,w1; /* 2: E = E + W */ \ rotrwi a,a,2; /* 2: B = B rotl 30 */ \ add d,d,rT0; /* 2: E = E + A' */ \ evmergelo w1,w1,w0; /* mix W[0]/W[1] */ \ add d,d,rT2 /* 2: E = E + F */ #define R_16_19(a, b, c, d, e, w0, w1, w4, w6, w7, k) \ and rT2,b,c; /* 1: F' = B and C */ \ evmergelohi rT0,w7,w6; /* W[-3] */ \ andc rT1,d,b; /* 1: F" = ~B and D */ \ evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \ or rT1,rT1,rT2; /* 1: F = F' or F" */ \ evxor w0,w0,w4; /* W = W xor W[-8] */ \ add e,e,rT1; /* 1: E = E + F */ \ evxor w0,w0,w1; /* W = W xor W[-14] */ \ rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \ evrlwi w0,w0,1; /* W = W rotl 1 */ \ add e,e,rT2; /* 1: E = E + A' */ \ evaddw rT0,w0,rK; /* WK = W + K */ \ rotrwi b,b,2; /* 1: B = B rotl 30 */ \ LOAD_K##k##1 \ evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \ add e,e,rT0; /* 1: E = E + WK */ \ add d,d,rT1; /* 2: E = E + WK */ \ and rT2,a,b; /* 2: F' = B and C */ \ andc rT1,c,a; /* 2: F" = ~B and D */ \ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \ or rT1,rT1,rT2; /* 2: F = F' or F" */ \ add d,d,rT0; /* 2: E = E + A' */ \ rotrwi a,a,2; /* 2: B = B rotl 30 */ \ add d,d,rT1 /* 2: E = E + F */ #define R_20_39(a, b, c, d, e, w0, w1, w4, w6, w7, k) \ evmergelohi rT0,w7,w6; /* W[-3] */ \ xor rT2,b,c; /* 1: F' = B xor C */ \ evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \ xor rT2,rT2,d; /* 1: F = F' xor D */ \ evxor w0,w0,w4; /* W = W xor W[-8] */ \ add e,e,rT2; /* 1: E = E + F */ \ evxor w0,w0,w1; /* W = W xor W[-14] */ \ rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \ evrlwi w0,w0,1; /* W = W rotl 1 */ \ add e,e,rT2; /* 1: E = E + A' */ \ evaddw rT0,w0,rK; /* WK = W + K */ \ rotrwi b,b,2; /* 1: B = B rotl 30 */ \ LOAD_K##k##1 \ evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \ add e,e,rT0; /* 1: E = E + WK */ \ xor rT2,a,b; /* 2: F' = B xor C */ \ add d,d,rT1; /* 2: E = E + WK */ \ xor rT2,rT2,c; /* 2: F = F' xor D */ \ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \ add d,d,rT2; /* 2: E = E + F */ \ rotrwi a,a,2; /* 2: B = B rotl 30 */ \ add d,d,rT0 /* 2: E = E + A' */ #define R_40_59(a, b, c, d, e, w0, w1, w4, w6, w7, k) \ and rT2,b,c; /* 1: F' = B and C */ \ evmergelohi rT0,w7,w6; /* W[-3] */ \ or rT1,b,c; /* 1: F" = B or C */ \ evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \ and rT1,d,rT1; /* 1: F" = F" and D */ \ evxor w0,w0,w4; /* W = W xor W[-8] */ \ or rT2,rT2,rT1; /* 1: F = F' or F" */ \ evxor w0,w0,w1; /* W = W xor W[-14] */ \ add e,e,rT2; /* 1: E = E + F */ \ evrlwi w0,w0,1; /* W = W rotl 1 */ \ rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \ evaddw rT0,w0,rK; /* WK = W + K */ \ add e,e,rT2; /* 1: E = E + A' */ \ LOAD_K##k##1 \ evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \ rotrwi b,b,2; /* 1: B = B rotl 30 */ \ add e,e,rT0; /* 1: E = E + WK */ \ and rT2,a,b; /* 2: F' = B and C */ \ or rT0,a,b; /* 2: F" = B or C */ \ add d,d,rT1; /* 2: E = E + WK */ \ and rT0,c,rT0; /* 2: F" = F" and D */ \ rotrwi a,a,2; /* 2: B = B rotl 30 */ \ or rT2,rT2,rT0; /* 2: F = F' or F" */ \ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \ add d,d,rT2; /* 2: E = E + F */ \ add d,d,rT0 /* 2: E = E + A' */ #define R_60_79(a, b, c, d, e, w0, w1, w4, w6, w7, k) \ R_20_39(a, b, c, d, e, w0, w1, w4, w6, w7, k) _GLOBAL(ppc_spe_sha1_transform) INITIALIZE lwz rH0,0(rHP) lwz rH1,4(rHP) mtctr r5 lwz rH2,8(rHP) lis rKP,PPC_SPE_SHA1_K@h lwz rH3,12(rHP) ori rKP,rKP,PPC_SPE_SHA1_K@l lwz rH4,16(rHP) ppc_spe_sha1_main: R_00_15(rH0, rH1, rH2, rH3, rH4, rW1, rW0, 1, 0) R_00_15(rH3, rH4, rH0, rH1, rH2, rW2, rW1, 0, 8) R_00_15(rH1, rH2, rH3, rH4, rH0, rW3, rW2, 0, 16) R_00_15(rH4, rH0, rH1, rH2, rH3, rW4, rW3, 0, 24) R_00_15(rH2, rH3, rH4, rH0, rH1, rW5, rW4, 0, 32) R_00_15(rH0, rH1, rH2, rH3, rH4, rW6, rW5, 0, 40) R_00_15(rH3, rH4, rH0, rH1, rH2, rT3, rW6, 0, 48) R_00_15(rH1, rH2, rH3, rH4, rH0, rT3, rW7, 0, 56) R_16_19(rH4, rH0, rH1, rH2, rH3, rW0, rW1, rW4, rW6, rW7, 0) R_16_19(rH2, rH3, rH4, rH0, rH1, rW1, rW2, rW5, rW7, rW0, 2) R_20_39(rH0, rH1, rH2, rH3, rH4, rW2, rW3, rW6, rW0, rW1, 0) R_20_39(rH3, rH4, rH0, rH1, rH2, rW3, rW4, rW7, rW1, rW2, 0) R_20_39(rH1, rH2, rH3, rH4, rH0, rW4, rW5, rW0, rW2, rW3, 0) R_20_39(rH4, rH0, rH1, rH2, rH3, rW5, rW6, rW1, rW3, rW4, 0) R_20_39(rH2, rH3, rH4, rH0, rH1, rW6, rW7, rW2, rW4, rW5, 0) R_20_39(rH0, rH1, rH2, rH3, rH4, rW7, rW0, rW3, rW5, rW6, 0) R_20_39(rH3, rH4, rH0, rH1, rH2, rW0, rW1, rW4, rW6, rW7, 0) R_20_39(rH1, rH2, rH3, rH4, rH0, rW1, rW2, rW5, rW7, rW0, 0) R_20_39(rH4, rH0, rH1, rH2, rH3, rW2, rW3, rW6, rW0, rW1, 0) R_20_39(rH2, rH3, rH4, rH0, rH1, rW3, rW4, rW7, rW1, rW2, 3) R_40_59(rH0, rH1, rH2, rH3, rH4, rW4, rW5, rW0, rW2, rW3, 0) R_40_59(rH3, rH4, rH0, rH1, rH2, rW5, rW6, rW1, rW3, rW4, 0) R_40_59(rH1, rH2, rH3, rH4, rH0, rW6, rW7, rW2, rW4, rW5, 0) R_40_59(rH4, rH0, rH1, rH2, rH3, rW7, rW0, rW3, rW5, rW6, 0) R_40_59(rH2, rH3, rH4, rH0, rH1, rW0, rW1, rW4, rW6, rW7, 0) R_40_59(rH0, rH1, rH2, rH3, rH4, rW1, rW2, rW5, rW7, rW0, 0) R_40_59(rH3, rH4, rH0, rH1, rH2, rW2, rW3, rW6, rW0, rW1, 0) R_40_59(rH1, rH2, rH3, rH4, rH0, rW3, rW4, rW7, rW1, rW2, 0) R_40_59(rH4, rH0, rH1, rH2, rH3, rW4, rW5, rW0, rW2, rW3, 0) R_40_59(rH2, rH3, rH4, rH0, rH1, rW5, rW6, rW1, rW3, rW4, 4) R_60_79(rH0, rH1, rH2, rH3, rH4, rW6, rW7, rW2, rW4, rW5, 0) R_60_79(rH3, rH4, rH0, rH1, rH2, rW7, rW0, rW3, rW5, rW6, 0) R_60_79(rH1, rH2, rH3, rH4, rH0, rW0, rW1, rW4, rW6, rW7, 0) R_60_79(rH4, rH0, rH1, rH2, rH3, rW1, rW2, rW5, rW7, rW0, 0) R_60_79(rH2, rH3, rH4, rH0, rH1, rW2, rW3, rW6, rW0, rW1, 0) R_60_79(rH0, rH1, rH2, rH3, rH4, rW3, rW4, rW7, rW1, rW2, 0) R_60_79(rH3, rH4, rH0, rH1, rH2, rW4, rW5, rW0, rW2, rW3, 0) lwz rT3,0(rHP) R_60_79(rH1, rH2, rH3, rH4, rH0, rW5, rW6, rW1, rW3, rW4, 0) lwz rW1,4(rHP) R_60_79(rH4, rH0, rH1, rH2, rH3, rW6, rW7, rW2, rW4, rW5, 0) lwz rW2,8(rHP) R_60_79(rH2, rH3, rH4, rH0, rH1, rW7, rW0, rW3, rW5, rW6, 0) lwz rW3,12(rHP) NEXT_BLOCK lwz rW4,16(rHP) add rH0,rH0,rT3 stw rH0,0(rHP) add rH1,rH1,rW1 stw rH1,4(rHP) add rH2,rH2,rW2 stw rH2,8(rHP) add rH3,rH3,rW3 stw rH3,12(rHP) add rH4,rH4,rW4 stw rH4,16(rHP) bdnz ppc_spe_sha1_main FINALIZE blr .data .align 4 PPC_SPE_SHA1_K: .long 0x5A827999,0x6ED9EBA1,0x8F1BBCDC,0xCA62C1D6
aixcc-public/challenge-001-exemplar-source
6,102
arch/powerpc/crypto/aes-spe-keys.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Key handling functions for PPC AES implementation * * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ #include <asm/ppc_asm.h> #ifdef __BIG_ENDIAN__ #define LOAD_KEY(d, s, off) \ lwz d,off(s); #else #define LOAD_KEY(d, s, off) \ li r0,off; \ lwbrx d,s,r0; #endif #define INITIALIZE_KEY \ stwu r1,-32(r1); /* create stack frame */ \ stw r14,8(r1); /* save registers */ \ stw r15,12(r1); \ stw r16,16(r1); #define FINALIZE_KEY \ lwz r14,8(r1); /* restore registers */ \ lwz r15,12(r1); \ lwz r16,16(r1); \ xor r5,r5,r5; /* clear sensitive data */ \ xor r6,r6,r6; \ xor r7,r7,r7; \ xor r8,r8,r8; \ xor r9,r9,r9; \ xor r10,r10,r10; \ xor r11,r11,r11; \ xor r12,r12,r12; \ addi r1,r1,32; /* cleanup stack */ #define LS_BOX(r, t1, t2) \ lis t2,PPC_AES_4K_ENCTAB@h; \ ori t2,t2,PPC_AES_4K_ENCTAB@l; \ rlwimi t2,r,4,20,27; \ lbz t1,8(t2); \ rlwimi r,t1,0,24,31; \ rlwimi t2,r,28,20,27; \ lbz t1,8(t2); \ rlwimi r,t1,8,16,23; \ rlwimi t2,r,20,20,27; \ lbz t1,8(t2); \ rlwimi r,t1,16,8,15; \ rlwimi t2,r,12,20,27; \ lbz t1,8(t2); \ rlwimi r,t1,24,0,7; #define GF8_MUL(out, in, t1, t2) \ lis t1,0x8080; /* multiplication in GF8 */ \ ori t1,t1,0x8080; \ and t1,t1,in; \ srwi t1,t1,7; \ mulli t1,t1,0x1b; \ lis t2,0x7f7f; \ ori t2,t2,0x7f7f; \ and t2,t2,in; \ slwi t2,t2,1; \ xor out,t1,t2; /* * ppc_expand_key_128(u32 *key_enc, const u8 *key) * * Expand 128 bit key into 176 bytes encryption key. It consists of * key itself plus 10 rounds with 16 bytes each * */ _GLOBAL(ppc_expand_key_128) INITIALIZE_KEY LOAD_KEY(r5,r4,0) LOAD_KEY(r6,r4,4) LOAD_KEY(r7,r4,8) LOAD_KEY(r8,r4,12) stw r5,0(r3) /* key[0..3] = input data */ stw r6,4(r3) stw r7,8(r3) stw r8,12(r3) li r16,10 /* 10 expansion rounds */ lis r0,0x0100 /* RCO(1) */ ppc_expand_128_loop: addi r3,r3,16 mr r14,r8 /* apply LS_BOX to 4th temp */ rotlwi r14,r14,8 LS_BOX(r14, r15, r4) xor r14,r14,r0 xor r5,r5,r14 /* xor next 4 keys */ xor r6,r6,r5 xor r7,r7,r6 xor r8,r8,r7 stw r5,0(r3) /* store next 4 keys */ stw r6,4(r3) stw r7,8(r3) stw r8,12(r3) GF8_MUL(r0, r0, r4, r14) /* multiply RCO by 2 in GF */ subi r16,r16,1 cmpwi r16,0 bt eq,ppc_expand_128_end b ppc_expand_128_loop ppc_expand_128_end: FINALIZE_KEY blr /* * ppc_expand_key_192(u32 *key_enc, const u8 *key) * * Expand 192 bit key into 208 bytes encryption key. It consists of key * itself plus 12 rounds with 16 bytes each * */ _GLOBAL(ppc_expand_key_192) INITIALIZE_KEY LOAD_KEY(r5,r4,0) LOAD_KEY(r6,r4,4) LOAD_KEY(r7,r4,8) LOAD_KEY(r8,r4,12) LOAD_KEY(r9,r4,16) LOAD_KEY(r10,r4,20) stw r5,0(r3) stw r6,4(r3) stw r7,8(r3) stw r8,12(r3) stw r9,16(r3) stw r10,20(r3) li r16,8 /* 8 expansion rounds */ lis r0,0x0100 /* RCO(1) */ ppc_expand_192_loop: addi r3,r3,24 mr r14,r10 /* apply LS_BOX to 6th temp */ rotlwi r14,r14,8 LS_BOX(r14, r15, r4) xor r14,r14,r0 xor r5,r5,r14 /* xor next 6 keys */ xor r6,r6,r5 xor r7,r7,r6 xor r8,r8,r7 xor r9,r9,r8 xor r10,r10,r9 stw r5,0(r3) stw r6,4(r3) stw r7,8(r3) stw r8,12(r3) subi r16,r16,1 cmpwi r16,0 /* last round early kick out */ bt eq,ppc_expand_192_end stw r9,16(r3) stw r10,20(r3) GF8_MUL(r0, r0, r4, r14) /* multiply RCO GF8 */ b ppc_expand_192_loop ppc_expand_192_end: FINALIZE_KEY blr /* * ppc_expand_key_256(u32 *key_enc, const u8 *key) * * Expand 256 bit key into 240 bytes encryption key. It consists of key * itself plus 14 rounds with 16 bytes each * */ _GLOBAL(ppc_expand_key_256) INITIALIZE_KEY LOAD_KEY(r5,r4,0) LOAD_KEY(r6,r4,4) LOAD_KEY(r7,r4,8) LOAD_KEY(r8,r4,12) LOAD_KEY(r9,r4,16) LOAD_KEY(r10,r4,20) LOAD_KEY(r11,r4,24) LOAD_KEY(r12,r4,28) stw r5,0(r3) stw r6,4(r3) stw r7,8(r3) stw r8,12(r3) stw r9,16(r3) stw r10,20(r3) stw r11,24(r3) stw r12,28(r3) li r16,7 /* 7 expansion rounds */ lis r0,0x0100 /* RCO(1) */ ppc_expand_256_loop: addi r3,r3,32 mr r14,r12 /* apply LS_BOX to 8th temp */ rotlwi r14,r14,8 LS_BOX(r14, r15, r4) xor r14,r14,r0 xor r5,r5,r14 /* xor 4 keys */ xor r6,r6,r5 xor r7,r7,r6 xor r8,r8,r7 mr r14,r8 LS_BOX(r14, r15, r4) /* apply LS_BOX to 4th temp */ xor r9,r9,r14 /* xor 4 keys */ xor r10,r10,r9 xor r11,r11,r10 xor r12,r12,r11 stw r5,0(r3) stw r6,4(r3) stw r7,8(r3) stw r8,12(r3) subi r16,r16,1 cmpwi r16,0 /* last round early kick out */ bt eq,ppc_expand_256_end stw r9,16(r3) stw r10,20(r3) stw r11,24(r3) stw r12,28(r3) GF8_MUL(r0, r0, r4, r14) b ppc_expand_256_loop ppc_expand_256_end: FINALIZE_KEY blr /* * ppc_generate_decrypt_key: derive decryption key from encryption key * number of bytes to handle are calculated from length of key (16/24/32) * */ _GLOBAL(ppc_generate_decrypt_key) addi r6,r5,24 slwi r6,r6,2 lwzx r7,r4,r6 /* first/last 4 words are same */ stw r7,0(r3) lwz r7,0(r4) stwx r7,r3,r6 addi r6,r6,4 lwzx r7,r4,r6 stw r7,4(r3) lwz r7,4(r4) stwx r7,r3,r6 addi r6,r6,4 lwzx r7,r4,r6 stw r7,8(r3) lwz r7,8(r4) stwx r7,r3,r6 addi r6,r6,4 lwzx r7,r4,r6 stw r7,12(r3) lwz r7,12(r4) stwx r7,r3,r6 addi r3,r3,16 add r4,r4,r6 subi r4,r4,28 addi r5,r5,20 srwi r5,r5,2 ppc_generate_decrypt_block: li r6,4 mtctr r6 ppc_generate_decrypt_word: lwz r6,0(r4) GF8_MUL(r7, r6, r0, r7) GF8_MUL(r8, r7, r0, r8) GF8_MUL(r9, r8, r0, r9) xor r10,r9,r6 xor r11,r7,r8 xor r11,r11,r9 xor r12,r7,r10 rotrwi r12,r12,24 xor r11,r11,r12 xor r12,r8,r10 rotrwi r12,r12,16 xor r11,r11,r12 rotrwi r12,r10,8 xor r11,r11,r12 stw r11,0(r3) addi r3,r3,4 addi r4,r4,4 bdnz ppc_generate_decrypt_word subi r4,r4,32 subi r5,r5,1 cmpwi r5,0 bt gt,ppc_generate_decrypt_block blr
aixcc-public/challenge-001-exemplar-source
3,915
arch/powerpc/crypto/sha1-powerpc-asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * SHA-1 implementation for PowerPC. * * Copyright (C) 2005 Paul Mackerras <paulus@samba.org> */ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/asm-compat.h> #ifdef __BIG_ENDIAN__ #define LWZ(rt, d, ra) \ lwz rt,d(ra) #else #define LWZ(rt, d, ra) \ li rt,d; \ lwbrx rt,rt,ra #endif /* * We roll the registers for T, A, B, C, D, E around on each * iteration; T on iteration t is A on iteration t+1, and so on. * We use registers 7 - 12 for this. */ #define RT(t) ((((t)+5)%6)+7) #define RA(t) ((((t)+4)%6)+7) #define RB(t) ((((t)+3)%6)+7) #define RC(t) ((((t)+2)%6)+7) #define RD(t) ((((t)+1)%6)+7) #define RE(t) ((((t)+0)%6)+7) /* We use registers 16 - 31 for the W values */ #define W(t) (((t)%16)+16) #define LOADW(t) \ LWZ(W(t),(t)*4,r4) #define STEPD0_LOAD(t) \ andc r0,RD(t),RB(t); \ and r6,RB(t),RC(t); \ rotlwi RT(t),RA(t),5; \ or r6,r6,r0; \ add r0,RE(t),r15; \ add RT(t),RT(t),r6; \ add r14,r0,W(t); \ LWZ(W((t)+4),((t)+4)*4,r4); \ rotlwi RB(t),RB(t),30; \ add RT(t),RT(t),r14 #define STEPD0_UPDATE(t) \ and r6,RB(t),RC(t); \ andc r0,RD(t),RB(t); \ rotlwi RT(t),RA(t),5; \ rotlwi RB(t),RB(t),30; \ or r6,r6,r0; \ add r0,RE(t),r15; \ xor r5,W((t)+4-3),W((t)+4-8); \ add RT(t),RT(t),r6; \ xor W((t)+4),W((t)+4-16),W((t)+4-14); \ add r0,r0,W(t); \ xor W((t)+4),W((t)+4),r5; \ add RT(t),RT(t),r0; \ rotlwi W((t)+4),W((t)+4),1 #define STEPD1(t) \ xor r6,RB(t),RC(t); \ rotlwi RT(t),RA(t),5; \ rotlwi RB(t),RB(t),30; \ xor r6,r6,RD(t); \ add r0,RE(t),r15; \ add RT(t),RT(t),r6; \ add r0,r0,W(t); \ add RT(t),RT(t),r0 #define STEPD1_UPDATE(t) \ xor r6,RB(t),RC(t); \ rotlwi RT(t),RA(t),5; \ rotlwi RB(t),RB(t),30; \ xor r6,r6,RD(t); \ add r0,RE(t),r15; \ xor r5,W((t)+4-3),W((t)+4-8); \ add RT(t),RT(t),r6; \ xor W((t)+4),W((t)+4-16),W((t)+4-14); \ add r0,r0,W(t); \ xor W((t)+4),W((t)+4),r5; \ add RT(t),RT(t),r0; \ rotlwi W((t)+4),W((t)+4),1 #define STEPD2_UPDATE(t) \ and r6,RB(t),RC(t); \ and r0,RB(t),RD(t); \ rotlwi RT(t),RA(t),5; \ or r6,r6,r0; \ rotlwi RB(t),RB(t),30; \ and r0,RC(t),RD(t); \ xor r5,W((t)+4-3),W((t)+4-8); \ or r6,r6,r0; \ xor W((t)+4),W((t)+4-16),W((t)+4-14); \ add r0,RE(t),r15; \ add RT(t),RT(t),r6; \ add r0,r0,W(t); \ xor W((t)+4),W((t)+4),r5; \ add RT(t),RT(t),r0; \ rotlwi W((t)+4),W((t)+4),1 #define STEP0LD4(t) \ STEPD0_LOAD(t); \ STEPD0_LOAD((t)+1); \ STEPD0_LOAD((t)+2); \ STEPD0_LOAD((t)+3) #define STEPUP4(t, fn) \ STEP##fn##_UPDATE(t); \ STEP##fn##_UPDATE((t)+1); \ STEP##fn##_UPDATE((t)+2); \ STEP##fn##_UPDATE((t)+3) #define STEPUP20(t, fn) \ STEPUP4(t, fn); \ STEPUP4((t)+4, fn); \ STEPUP4((t)+8, fn); \ STEPUP4((t)+12, fn); \ STEPUP4((t)+16, fn) _GLOBAL(powerpc_sha_transform) PPC_STLU r1,-INT_FRAME_SIZE(r1) SAVE_GPRS(14, 31, r1) /* Load up A - E */ lwz RA(0),0(r3) /* A */ lwz RB(0),4(r3) /* B */ lwz RC(0),8(r3) /* C */ lwz RD(0),12(r3) /* D */ lwz RE(0),16(r3) /* E */ LOADW(0) LOADW(1) LOADW(2) LOADW(3) lis r15,0x5a82 /* K0-19 */ ori r15,r15,0x7999 STEP0LD4(0) STEP0LD4(4) STEP0LD4(8) STEPUP4(12, D0) STEPUP4(16, D0) lis r15,0x6ed9 /* K20-39 */ ori r15,r15,0xeba1 STEPUP20(20, D1) lis r15,0x8f1b /* K40-59 */ ori r15,r15,0xbcdc STEPUP20(40, D2) lis r15,0xca62 /* K60-79 */ ori r15,r15,0xc1d6 STEPUP4(60, D1) STEPUP4(64, D1) STEPUP4(68, D1) STEPUP4(72, D1) lwz r20,16(r3) STEPD1(76) lwz r19,12(r3) STEPD1(77) lwz r18,8(r3) STEPD1(78) lwz r17,4(r3) STEPD1(79) lwz r16,0(r3) add r20,RE(80),r20 add RD(0),RD(80),r19 add RC(0),RC(80),r18 add RB(0),RB(80),r17 add RA(0),RA(80),r16 mr RE(0),r20 stw RA(0),0(r3) stw RB(0),4(r3) stw RC(0),8(r3) stw RD(0),12(r3) stw RE(0),16(r3) REST_GPRS(14, 31, r1) addi r1,r1,INT_FRAME_SIZE blr
aixcc-public/challenge-001-exemplar-source
11,199
arch/powerpc/crypto/sha256-spe-asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Fast SHA-256 implementation for SPE instruction set (PPC) * * This code makes use of the SPE SIMD instruction set as defined in * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf * Implementation is based on optimization guide notes from * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf * * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #define rHP r3 /* pointer to hash values in memory */ #define rKP r24 /* pointer to round constants */ #define rWP r4 /* pointer to input data */ #define rH0 r5 /* 8 32 bit hash values in 8 registers */ #define rH1 r6 #define rH2 r7 #define rH3 r8 #define rH4 r9 #define rH5 r10 #define rH6 r11 #define rH7 r12 #define rW0 r14 /* 64 bit registers. 16 words in 8 registers */ #define rW1 r15 #define rW2 r16 #define rW3 r17 #define rW4 r18 #define rW5 r19 #define rW6 r20 #define rW7 r21 #define rT0 r22 /* 64 bit temporaries */ #define rT1 r23 #define rT2 r0 /* 32 bit temporaries */ #define rT3 r25 #define CMP_KN_LOOP #define CMP_KC_LOOP \ cmpwi rT1,0; #define INITIALIZE \ stwu r1,-128(r1); /* create stack frame */ \ evstdw r14,8(r1); /* We must save non volatile */ \ evstdw r15,16(r1); /* registers. Take the chance */ \ evstdw r16,24(r1); /* and save the SPE part too */ \ evstdw r17,32(r1); \ evstdw r18,40(r1); \ evstdw r19,48(r1); \ evstdw r20,56(r1); \ evstdw r21,64(r1); \ evstdw r22,72(r1); \ evstdw r23,80(r1); \ stw r24,88(r1); /* save normal registers */ \ stw r25,92(r1); #define FINALIZE \ evldw r14,8(r1); /* restore SPE registers */ \ evldw r15,16(r1); \ evldw r16,24(r1); \ evldw r17,32(r1); \ evldw r18,40(r1); \ evldw r19,48(r1); \ evldw r20,56(r1); \ evldw r21,64(r1); \ evldw r22,72(r1); \ evldw r23,80(r1); \ lwz r24,88(r1); /* restore normal registers */ \ lwz r25,92(r1); \ xor r0,r0,r0; \ stw r0,8(r1); /* Delete sensitive data */ \ stw r0,16(r1); /* that we might have pushed */ \ stw r0,24(r1); /* from other context that runs */ \ stw r0,32(r1); /* the same code. Assume that */ \ stw r0,40(r1); /* the lower part of the GPRs */ \ stw r0,48(r1); /* was already overwritten on */ \ stw r0,56(r1); /* the way down to here */ \ stw r0,64(r1); \ stw r0,72(r1); \ stw r0,80(r1); \ addi r1,r1,128; /* cleanup stack frame */ #ifdef __BIG_ENDIAN__ #define LOAD_DATA(reg, off) \ lwz reg,off(rWP); /* load data */ #define NEXT_BLOCK \ addi rWP,rWP,64; /* increment per block */ #else #define LOAD_DATA(reg, off) \ lwbrx reg,0,rWP; /* load data */ \ addi rWP,rWP,4; /* increment per word */ #define NEXT_BLOCK /* nothing to do */ #endif #define R_LOAD_W(a, b, c, d, e, f, g, h, w, off) \ LOAD_DATA(w, off) /* 1: W */ \ rotrwi rT0,e,6; /* 1: S1 = e rotr 6 */ \ rotrwi rT1,e,11; /* 1: S1' = e rotr 11 */ \ rotrwi rT2,e,25; /* 1: S1" = e rotr 25 */ \ xor rT0,rT0,rT1; /* 1: S1 = S1 xor S1' */ \ and rT3,e,f; /* 1: ch = e and f */ \ xor rT0,rT0,rT2; /* 1: S1 = S1 xor S1" */ \ andc rT1,g,e; /* 1: ch' = ~e and g */ \ lwz rT2,off(rKP); /* 1: K */ \ xor rT3,rT3,rT1; /* 1: ch = ch xor ch' */ \ add h,h,rT0; /* 1: temp1 = h + S1 */ \ add rT3,rT3,w; /* 1: temp1' = ch + w */ \ rotrwi rT0,a,2; /* 1: S0 = a rotr 2 */ \ add h,h,rT3; /* 1: temp1 = temp1 + temp1' */ \ rotrwi rT1,a,13; /* 1: S0' = a rotr 13 */ \ add h,h,rT2; /* 1: temp1 = temp1 + K */ \ rotrwi rT3,a,22; /* 1: S0" = a rotr 22 */ \ xor rT0,rT0,rT1; /* 1: S0 = S0 xor S0' */ \ add d,d,h; /* 1: d = d + temp1 */ \ xor rT3,rT0,rT3; /* 1: S0 = S0 xor S0" */ \ evmergelo w,w,w; /* shift W */ \ or rT2,a,b; /* 1: maj = a or b */ \ and rT1,a,b; /* 1: maj' = a and b */ \ and rT2,rT2,c; /* 1: maj = maj and c */ \ LOAD_DATA(w, off+4) /* 2: W */ \ or rT2,rT1,rT2; /* 1: maj = maj or maj' */ \ rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \ add rT3,rT3,rT2; /* 1: temp2 = S0 + maj */ \ rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \ add h,h,rT3; /* 1: h = temp1 + temp2 */ \ rotrwi rT2,d,25; /* 2: S1" = e rotr 25 */ \ xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \ and rT3,d,e; /* 2: ch = e and f */ \ xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \ andc rT1,f,d; /* 2: ch' = ~e and g */ \ lwz rT2,off+4(rKP); /* 2: K */ \ xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \ add g,g,rT0; /* 2: temp1 = h + S1 */ \ add rT3,rT3,w; /* 2: temp1' = ch + w */ \ rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \ add g,g,rT3; /* 2: temp1 = temp1 + temp1' */ \ rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \ add g,g,rT2; /* 2: temp1 = temp1 + K */ \ rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \ xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \ or rT2,h,a; /* 2: maj = a or b */ \ xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \ and rT1,h,a; /* 2: maj' = a and b */ \ and rT2,rT2,b; /* 2: maj = maj and c */ \ add c,c,g; /* 2: d = d + temp1 */ \ or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \ add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \ add g,g,rT3 /* 2: h = temp1 + temp2 */ #define R_CALC_W(a, b, c, d, e, f, g, h, w0, w1, w4, w5, w7, k, off) \ rotrwi rT2,e,6; /* 1: S1 = e rotr 6 */ \ evmergelohi rT0,w0,w1; /* w[-15] */ \ rotrwi rT3,e,11; /* 1: S1' = e rotr 11 */ \ evsrwiu rT1,rT0,3; /* s0 = w[-15] >> 3 */ \ xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \ evrlwi rT0,rT0,25; /* s0' = w[-15] rotr 7 */ \ rotrwi rT3,e,25; /* 1: S1' = e rotr 25 */ \ evxor rT1,rT1,rT0; /* s0 = s0 xor s0' */ \ xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \ evrlwi rT0,rT0,21; /* s0' = w[-15] rotr 18 */ \ add h,h,rT2; /* 1: temp1 = h + S1 */ \ evxor rT0,rT0,rT1; /* s0 = s0 xor s0' */ \ and rT2,e,f; /* 1: ch = e and f */ \ evaddw w0,w0,rT0; /* w = w[-16] + s0 */ \ andc rT3,g,e; /* 1: ch' = ~e and g */ \ evsrwiu rT0,w7,10; /* s1 = w[-2] >> 10 */ \ xor rT2,rT2,rT3; /* 1: ch = ch xor ch' */ \ evrlwi rT1,w7,15; /* s1' = w[-2] rotr 17 */ \ add h,h,rT2; /* 1: temp1 = temp1 + ch */ \ evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \ rotrwi rT2,a,2; /* 1: S0 = a rotr 2 */ \ evrlwi rT1,w7,13; /* s1' = w[-2] rotr 19 */ \ rotrwi rT3,a,13; /* 1: S0' = a rotr 13 */ \ evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \ xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \ evldw rT1,off(rKP); /* k */ \ rotrwi rT3,a,22; /* 1: S0' = a rotr 22 */ \ evaddw w0,w0,rT0; /* w = w + s1 */ \ xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \ evmergelohi rT0,w4,w5; /* w[-7] */ \ and rT3,a,b; /* 1: maj = a and b */ \ evaddw w0,w0,rT0; /* w = w + w[-7] */ \ CMP_K##k##_LOOP \ add rT2,rT2,rT3; /* 1: temp2 = S0 + maj */ \ evaddw rT1,rT1,w0; /* wk = w + k */ \ xor rT3,a,b; /* 1: maj = a xor b */ \ evmergehi rT0,rT1,rT1; /* wk1/wk2 */ \ and rT3,rT3,c; /* 1: maj = maj and c */ \ add h,h,rT0; /* 1: temp1 = temp1 + wk */ \ add rT2,rT2,rT3; /* 1: temp2 = temp2 + maj */ \ add g,g,rT1; /* 2: temp1 = temp1 + wk */ \ add d,d,h; /* 1: d = d + temp1 */ \ rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \ add h,h,rT2; /* 1: h = temp1 + temp2 */ \ rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \ rotrwi rT2,d,25; /* 2: S" = e rotr 25 */ \ xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \ and rT3,d,e; /* 2: ch = e and f */ \ xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \ andc rT1,f,d; /* 2: ch' = ~e and g */ \ add g,g,rT0; /* 2: temp1 = h + S1 */ \ xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \ rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \ add g,g,rT3; /* 2: temp1 = temp1 + ch */ \ rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \ rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \ xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \ or rT2,h,a; /* 2: maj = a or b */ \ and rT1,h,a; /* 2: maj' = a and b */ \ and rT2,rT2,b; /* 2: maj = maj and c */ \ xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \ or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \ add c,c,g; /* 2: d = d + temp1 */ \ add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \ add g,g,rT3 /* 2: h = temp1 + temp2 */ _GLOBAL(ppc_spe_sha256_transform) INITIALIZE mtctr r5 lwz rH0,0(rHP) lwz rH1,4(rHP) lwz rH2,8(rHP) lwz rH3,12(rHP) lwz rH4,16(rHP) lwz rH5,20(rHP) lwz rH6,24(rHP) lwz rH7,28(rHP) ppc_spe_sha256_main: lis rKP,PPC_SPE_SHA256_K@ha addi rKP,rKP,PPC_SPE_SHA256_K@l R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW0, 0) R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW1, 8) R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW2, 16) R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW3, 24) R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW4, 32) R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW5, 40) R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW6, 48) R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW7, 56) ppc_spe_sha256_16_rounds: addi rKP,rKP,64 R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW0, rW1, rW4, rW5, rW7, N, 0) R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW1, rW2, rW5, rW6, rW0, N, 8) R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW2, rW3, rW6, rW7, rW1, N, 16) R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW3, rW4, rW7, rW0, rW2, N, 24) R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW4, rW5, rW0, rW1, rW3, N, 32) R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW5, rW6, rW1, rW2, rW4, N, 40) R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW6, rW7, rW2, rW3, rW5, N, 48) R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW7, rW0, rW3, rW4, rW6, C, 56) bt gt,ppc_spe_sha256_16_rounds lwz rW0,0(rHP) NEXT_BLOCK lwz rW1,4(rHP) lwz rW2,8(rHP) lwz rW3,12(rHP) lwz rW4,16(rHP) lwz rW5,20(rHP) lwz rW6,24(rHP) lwz rW7,28(rHP) add rH0,rH0,rW0 stw rH0,0(rHP) add rH1,rH1,rW1 stw rH1,4(rHP) add rH2,rH2,rW2 stw rH2,8(rHP) add rH3,rH3,rW3 stw rH3,12(rHP) add rH4,rH4,rW4 stw rH4,16(rHP) add rH5,rH5,rW5 stw rH5,20(rHP) add rH6,rH6,rW6 stw rH6,24(rHP) add rH7,rH7,rW7 stw rH7,28(rHP) bdnz ppc_spe_sha256_main FINALIZE blr .data .align 5 PPC_SPE_SHA256_K: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
aixcc-public/challenge-001-exemplar-source
14,207
arch/powerpc/crypto/crc32-vpmsum_core.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Core of the accelerated CRC algorithm. * In your file, define the constants and CRC_FUNCTION_NAME * Then include this file. * * Calculate the checksum of data that is 16 byte aligned and a multiple of * 16 bytes. * * The first step is to reduce it to 1024 bits. We do this in 8 parallel * chunks in order to mask the latency of the vpmsum instructions. If we * have more than 32 kB of data to checksum we repeat this step multiple * times, passing in the previous 1024 bits. * * The next step is to reduce the 1024 bits to 64 bits. This step adds * 32 bits of 0s to the end - this matches what a CRC does. We just * calculate constants that land the data in this 32 bits. * * We then use fixed point Barrett reduction to compute a mod n over GF(2) * for n = CRC using POWER8 instructions. We use x = 32. * * https://en.wikipedia.org/wiki/Barrett_reduction * * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM */ #include <asm/ppc_asm.h> #include <asm/ppc-opcode.h> #define MAX_SIZE 32768 .text #if defined(__BIG_ENDIAN__) && defined(REFLECT) #define BYTESWAP_DATA #elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT) #define BYTESWAP_DATA #else #undef BYTESWAP_DATA #endif #define off16 r25 #define off32 r26 #define off48 r27 #define off64 r28 #define off80 r29 #define off96 r30 #define off112 r31 #define const1 v24 #define const2 v25 #define byteswap v26 #define mask_32bit v27 #define mask_64bit v28 #define zeroes v29 #ifdef BYTESWAP_DATA #define VPERM(A, B, C, D) vperm A, B, C, D #else #define VPERM(A, B, C, D) #endif /* unsigned int CRC_FUNCTION_NAME(unsigned int crc, void *p, unsigned long len) */ FUNC_START(CRC_FUNCTION_NAME) std r31,-8(r1) std r30,-16(r1) std r29,-24(r1) std r28,-32(r1) std r27,-40(r1) std r26,-48(r1) std r25,-56(r1) li off16,16 li off32,32 li off48,48 li off64,64 li off80,80 li off96,96 li off112,112 li r0,0 /* Enough room for saving 10 non volatile VMX registers */ subi r6,r1,56+10*16 subi r7,r1,56+2*16 stvx v20,0,r6 stvx v21,off16,r6 stvx v22,off32,r6 stvx v23,off48,r6 stvx v24,off64,r6 stvx v25,off80,r6 stvx v26,off96,r6 stvx v27,off112,r6 stvx v28,0,r7 stvx v29,off16,r7 mr r10,r3 vxor zeroes,zeroes,zeroes vspltisw v0,-1 vsldoi mask_32bit,zeroes,v0,4 vsldoi mask_64bit,zeroes,v0,8 /* Get the initial value into v8 */ vxor v8,v8,v8 MTVRD(v8, R3) #ifdef REFLECT vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */ #else vsldoi v8,v8,zeroes,4 /* shift into top 32 bits */ #endif #ifdef BYTESWAP_DATA addis r3,r2,.byteswap_constant@toc@ha addi r3,r3,.byteswap_constant@toc@l lvx byteswap,0,r3 addi r3,r3,16 #endif cmpdi r5,256 blt .Lshort rldicr r6,r5,0,56 /* Checksum in blocks of MAX_SIZE */ 1: lis r7,MAX_SIZE@h ori r7,r7,MAX_SIZE@l mr r9,r7 cmpd r6,r7 bgt 2f mr r7,r6 2: subf r6,r7,r6 /* our main loop does 128 bytes at a time */ srdi r7,r7,7 /* * Work out the offset into the constants table to start at. Each * constant is 16 bytes, and it is used against 128 bytes of input * data - 128 / 16 = 8 */ sldi r8,r7,4 srdi r9,r9,3 subf r8,r8,r9 /* We reduce our final 128 bytes in a separate step */ addi r7,r7,-1 mtctr r7 addis r3,r2,.constants@toc@ha addi r3,r3,.constants@toc@l /* Find the start of our constants */ add r3,r3,r8 /* zero v0-v7 which will contain our checksums */ vxor v0,v0,v0 vxor v1,v1,v1 vxor v2,v2,v2 vxor v3,v3,v3 vxor v4,v4,v4 vxor v5,v5,v5 vxor v6,v6,v6 vxor v7,v7,v7 lvx const1,0,r3 /* * If we are looping back to consume more data we use the values * already in v16-v23. */ cmpdi r0,1 beq 2f /* First warm up pass */ lvx v16,0,r4 lvx v17,off16,r4 VPERM(v16,v16,v16,byteswap) VPERM(v17,v17,v17,byteswap) lvx v18,off32,r4 lvx v19,off48,r4 VPERM(v18,v18,v18,byteswap) VPERM(v19,v19,v19,byteswap) lvx v20,off64,r4 lvx v21,off80,r4 VPERM(v20,v20,v20,byteswap) VPERM(v21,v21,v21,byteswap) lvx v22,off96,r4 lvx v23,off112,r4 VPERM(v22,v22,v22,byteswap) VPERM(v23,v23,v23,byteswap) addi r4,r4,8*16 /* xor in initial value */ vxor v16,v16,v8 2: bdz .Lfirst_warm_up_done addi r3,r3,16 lvx const2,0,r3 /* Second warm up pass */ VPMSUMD(v8,v16,const1) lvx v16,0,r4 VPERM(v16,v16,v16,byteswap) ori r2,r2,0 VPMSUMD(v9,v17,const1) lvx v17,off16,r4 VPERM(v17,v17,v17,byteswap) ori r2,r2,0 VPMSUMD(v10,v18,const1) lvx v18,off32,r4 VPERM(v18,v18,v18,byteswap) ori r2,r2,0 VPMSUMD(v11,v19,const1) lvx v19,off48,r4 VPERM(v19,v19,v19,byteswap) ori r2,r2,0 VPMSUMD(v12,v20,const1) lvx v20,off64,r4 VPERM(v20,v20,v20,byteswap) ori r2,r2,0 VPMSUMD(v13,v21,const1) lvx v21,off80,r4 VPERM(v21,v21,v21,byteswap) ori r2,r2,0 VPMSUMD(v14,v22,const1) lvx v22,off96,r4 VPERM(v22,v22,v22,byteswap) ori r2,r2,0 VPMSUMD(v15,v23,const1) lvx v23,off112,r4 VPERM(v23,v23,v23,byteswap) addi r4,r4,8*16 bdz .Lfirst_cool_down /* * main loop. We modulo schedule it such that it takes three iterations * to complete - first iteration load, second iteration vpmsum, third * iteration xor. */ .balign 16 4: lvx const1,0,r3 addi r3,r3,16 ori r2,r2,0 vxor v0,v0,v8 VPMSUMD(v8,v16,const2) lvx v16,0,r4 VPERM(v16,v16,v16,byteswap) ori r2,r2,0 vxor v1,v1,v9 VPMSUMD(v9,v17,const2) lvx v17,off16,r4 VPERM(v17,v17,v17,byteswap) ori r2,r2,0 vxor v2,v2,v10 VPMSUMD(v10,v18,const2) lvx v18,off32,r4 VPERM(v18,v18,v18,byteswap) ori r2,r2,0 vxor v3,v3,v11 VPMSUMD(v11,v19,const2) lvx v19,off48,r4 VPERM(v19,v19,v19,byteswap) lvx const2,0,r3 ori r2,r2,0 vxor v4,v4,v12 VPMSUMD(v12,v20,const1) lvx v20,off64,r4 VPERM(v20,v20,v20,byteswap) ori r2,r2,0 vxor v5,v5,v13 VPMSUMD(v13,v21,const1) lvx v21,off80,r4 VPERM(v21,v21,v21,byteswap) ori r2,r2,0 vxor v6,v6,v14 VPMSUMD(v14,v22,const1) lvx v22,off96,r4 VPERM(v22,v22,v22,byteswap) ori r2,r2,0 vxor v7,v7,v15 VPMSUMD(v15,v23,const1) lvx v23,off112,r4 VPERM(v23,v23,v23,byteswap) addi r4,r4,8*16 bdnz 4b .Lfirst_cool_down: /* First cool down pass */ lvx const1,0,r3 addi r3,r3,16 vxor v0,v0,v8 VPMSUMD(v8,v16,const1) ori r2,r2,0 vxor v1,v1,v9 VPMSUMD(v9,v17,const1) ori r2,r2,0 vxor v2,v2,v10 VPMSUMD(v10,v18,const1) ori r2,r2,0 vxor v3,v3,v11 VPMSUMD(v11,v19,const1) ori r2,r2,0 vxor v4,v4,v12 VPMSUMD(v12,v20,const1) ori r2,r2,0 vxor v5,v5,v13 VPMSUMD(v13,v21,const1) ori r2,r2,0 vxor v6,v6,v14 VPMSUMD(v14,v22,const1) ori r2,r2,0 vxor v7,v7,v15 VPMSUMD(v15,v23,const1) ori r2,r2,0 .Lsecond_cool_down: /* Second cool down pass */ vxor v0,v0,v8 vxor v1,v1,v9 vxor v2,v2,v10 vxor v3,v3,v11 vxor v4,v4,v12 vxor v5,v5,v13 vxor v6,v6,v14 vxor v7,v7,v15 #ifdef REFLECT /* * vpmsumd produces a 96 bit result in the least significant bits * of the register. Since we are bit reflected we have to shift it * left 32 bits so it occupies the least significant bits in the * bit reflected domain. */ vsldoi v0,v0,zeroes,4 vsldoi v1,v1,zeroes,4 vsldoi v2,v2,zeroes,4 vsldoi v3,v3,zeroes,4 vsldoi v4,v4,zeroes,4 vsldoi v5,v5,zeroes,4 vsldoi v6,v6,zeroes,4 vsldoi v7,v7,zeroes,4 #endif /* xor with last 1024 bits */ lvx v8,0,r4 lvx v9,off16,r4 VPERM(v8,v8,v8,byteswap) VPERM(v9,v9,v9,byteswap) lvx v10,off32,r4 lvx v11,off48,r4 VPERM(v10,v10,v10,byteswap) VPERM(v11,v11,v11,byteswap) lvx v12,off64,r4 lvx v13,off80,r4 VPERM(v12,v12,v12,byteswap) VPERM(v13,v13,v13,byteswap) lvx v14,off96,r4 lvx v15,off112,r4 VPERM(v14,v14,v14,byteswap) VPERM(v15,v15,v15,byteswap) addi r4,r4,8*16 vxor v16,v0,v8 vxor v17,v1,v9 vxor v18,v2,v10 vxor v19,v3,v11 vxor v20,v4,v12 vxor v21,v5,v13 vxor v22,v6,v14 vxor v23,v7,v15 li r0,1 cmpdi r6,0 addi r6,r6,128 bne 1b /* Work out how many bytes we have left */ andi. r5,r5,127 /* Calculate where in the constant table we need to start */ subfic r6,r5,128 add r3,r3,r6 /* How many 16 byte chunks are in the tail */ srdi r7,r5,4 mtctr r7 /* * Reduce the previously calculated 1024 bits to 64 bits, shifting * 32 bits to include the trailing 32 bits of zeros */ lvx v0,0,r3 lvx v1,off16,r3 lvx v2,off32,r3 lvx v3,off48,r3 lvx v4,off64,r3 lvx v5,off80,r3 lvx v6,off96,r3 lvx v7,off112,r3 addi r3,r3,8*16 VPMSUMW(v0,v16,v0) VPMSUMW(v1,v17,v1) VPMSUMW(v2,v18,v2) VPMSUMW(v3,v19,v3) VPMSUMW(v4,v20,v4) VPMSUMW(v5,v21,v5) VPMSUMW(v6,v22,v6) VPMSUMW(v7,v23,v7) /* Now reduce the tail (0 - 112 bytes) */ cmpdi r7,0 beq 1f lvx v16,0,r4 lvx v17,0,r3 VPERM(v16,v16,v16,byteswap) VPMSUMW(v16,v16,v17) vxor v0,v0,v16 bdz 1f lvx v16,off16,r4 lvx v17,off16,r3 VPERM(v16,v16,v16,byteswap) VPMSUMW(v16,v16,v17) vxor v0,v0,v16 bdz 1f lvx v16,off32,r4 lvx v17,off32,r3 VPERM(v16,v16,v16,byteswap) VPMSUMW(v16,v16,v17) vxor v0,v0,v16 bdz 1f lvx v16,off48,r4 lvx v17,off48,r3 VPERM(v16,v16,v16,byteswap) VPMSUMW(v16,v16,v17) vxor v0,v0,v16 bdz 1f lvx v16,off64,r4 lvx v17,off64,r3 VPERM(v16,v16,v16,byteswap) VPMSUMW(v16,v16,v17) vxor v0,v0,v16 bdz 1f lvx v16,off80,r4 lvx v17,off80,r3 VPERM(v16,v16,v16,byteswap) VPMSUMW(v16,v16,v17) vxor v0,v0,v16 bdz 1f lvx v16,off96,r4 lvx v17,off96,r3 VPERM(v16,v16,v16,byteswap) VPMSUMW(v16,v16,v17) vxor v0,v0,v16 /* Now xor all the parallel chunks together */ 1: vxor v0,v0,v1 vxor v2,v2,v3 vxor v4,v4,v5 vxor v6,v6,v7 vxor v0,v0,v2 vxor v4,v4,v6 vxor v0,v0,v4 .Lbarrett_reduction: /* Barrett constants */ addis r3,r2,.barrett_constants@toc@ha addi r3,r3,.barrett_constants@toc@l lvx const1,0,r3 lvx const2,off16,r3 vsldoi v1,v0,v0,8 vxor v0,v0,v1 /* xor two 64 bit results together */ #ifdef REFLECT /* shift left one bit */ vspltisb v1,1 vsl v0,v0,v1 #endif vand v0,v0,mask_64bit #ifndef REFLECT /* * Now for the Barrett reduction algorithm. The idea is to calculate q, * the multiple of our polynomial that we need to subtract. By * doing the computation 2x bits higher (ie 64 bits) and shifting the * result back down 2x bits, we round down to the nearest multiple. */ VPMSUMD(v1,v0,const1) /* ma */ vsldoi v1,zeroes,v1,8 /* q = floor(ma/(2^64)) */ VPMSUMD(v1,v1,const2) /* qn */ vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ /* * Get the result into r3. We need to shift it left 8 bytes: * V0 [ 0 1 2 X ] * V0 [ 0 X 2 3 ] */ vsldoi v0,v0,zeroes,8 /* shift result into top 64 bits */ #else /* * The reflected version of Barrett reduction. Instead of bit * reflecting our data (which is expensive to do), we bit reflect our * constants and our algorithm, which means the intermediate data in * our vector registers goes from 0-63 instead of 63-0. We can reflect * the algorithm because we don't carry in mod 2 arithmetic. */ vand v1,v0,mask_32bit /* bottom 32 bits of a */ VPMSUMD(v1,v1,const1) /* ma */ vand v1,v1,mask_32bit /* bottom 32bits of ma */ VPMSUMD(v1,v1,const2) /* qn */ vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ /* * Since we are bit reflected, the result (ie the low 32 bits) is in * the high 32 bits. We just need to shift it left 4 bytes * V0 [ 0 1 X 3 ] * V0 [ 0 X 2 3 ] */ vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */ #endif /* Get it into r3 */ MFVRD(R3, v0) .Lout: subi r6,r1,56+10*16 subi r7,r1,56+2*16 lvx v20,0,r6 lvx v21,off16,r6 lvx v22,off32,r6 lvx v23,off48,r6 lvx v24,off64,r6 lvx v25,off80,r6 lvx v26,off96,r6 lvx v27,off112,r6 lvx v28,0,r7 lvx v29,off16,r7 ld r31,-8(r1) ld r30,-16(r1) ld r29,-24(r1) ld r28,-32(r1) ld r27,-40(r1) ld r26,-48(r1) ld r25,-56(r1) blr .Lfirst_warm_up_done: lvx const1,0,r3 addi r3,r3,16 VPMSUMD(v8,v16,const1) VPMSUMD(v9,v17,const1) VPMSUMD(v10,v18,const1) VPMSUMD(v11,v19,const1) VPMSUMD(v12,v20,const1) VPMSUMD(v13,v21,const1) VPMSUMD(v14,v22,const1) VPMSUMD(v15,v23,const1) b .Lsecond_cool_down .Lshort: cmpdi r5,0 beq .Lzero addis r3,r2,.short_constants@toc@ha addi r3,r3,.short_constants@toc@l /* Calculate where in the constant table we need to start */ subfic r6,r5,256 add r3,r3,r6 /* How many 16 byte chunks? */ srdi r7,r5,4 mtctr r7 vxor v19,v19,v19 vxor v20,v20,v20 lvx v0,0,r4 lvx v16,0,r3 VPERM(v0,v0,v16,byteswap) vxor v0,v0,v8 /* xor in initial value */ VPMSUMW(v0,v0,v16) bdz .Lv0 lvx v1,off16,r4 lvx v17,off16,r3 VPERM(v1,v1,v17,byteswap) VPMSUMW(v1,v1,v17) bdz .Lv1 lvx v2,off32,r4 lvx v16,off32,r3 VPERM(v2,v2,v16,byteswap) VPMSUMW(v2,v2,v16) bdz .Lv2 lvx v3,off48,r4 lvx v17,off48,r3 VPERM(v3,v3,v17,byteswap) VPMSUMW(v3,v3,v17) bdz .Lv3 lvx v4,off64,r4 lvx v16,off64,r3 VPERM(v4,v4,v16,byteswap) VPMSUMW(v4,v4,v16) bdz .Lv4 lvx v5,off80,r4 lvx v17,off80,r3 VPERM(v5,v5,v17,byteswap) VPMSUMW(v5,v5,v17) bdz .Lv5 lvx v6,off96,r4 lvx v16,off96,r3 VPERM(v6,v6,v16,byteswap) VPMSUMW(v6,v6,v16) bdz .Lv6 lvx v7,off112,r4 lvx v17,off112,r3 VPERM(v7,v7,v17,byteswap) VPMSUMW(v7,v7,v17) bdz .Lv7 addi r3,r3,128 addi r4,r4,128 lvx v8,0,r4 lvx v16,0,r3 VPERM(v8,v8,v16,byteswap) VPMSUMW(v8,v8,v16) bdz .Lv8 lvx v9,off16,r4 lvx v17,off16,r3 VPERM(v9,v9,v17,byteswap) VPMSUMW(v9,v9,v17) bdz .Lv9 lvx v10,off32,r4 lvx v16,off32,r3 VPERM(v10,v10,v16,byteswap) VPMSUMW(v10,v10,v16) bdz .Lv10 lvx v11,off48,r4 lvx v17,off48,r3 VPERM(v11,v11,v17,byteswap) VPMSUMW(v11,v11,v17) bdz .Lv11 lvx v12,off64,r4 lvx v16,off64,r3 VPERM(v12,v12,v16,byteswap) VPMSUMW(v12,v12,v16) bdz .Lv12 lvx v13,off80,r4 lvx v17,off80,r3 VPERM(v13,v13,v17,byteswap) VPMSUMW(v13,v13,v17) bdz .Lv13 lvx v14,off96,r4 lvx v16,off96,r3 VPERM(v14,v14,v16,byteswap) VPMSUMW(v14,v14,v16) bdz .Lv14 lvx v15,off112,r4 lvx v17,off112,r3 VPERM(v15,v15,v17,byteswap) VPMSUMW(v15,v15,v17) .Lv15: vxor v19,v19,v15 .Lv14: vxor v20,v20,v14 .Lv13: vxor v19,v19,v13 .Lv12: vxor v20,v20,v12 .Lv11: vxor v19,v19,v11 .Lv10: vxor v20,v20,v10 .Lv9: vxor v19,v19,v9 .Lv8: vxor v20,v20,v8 .Lv7: vxor v19,v19,v7 .Lv6: vxor v20,v20,v6 .Lv5: vxor v19,v19,v5 .Lv4: vxor v20,v20,v4 .Lv3: vxor v19,v19,v3 .Lv2: vxor v20,v20,v2 .Lv1: vxor v19,v19,v1 .Lv0: vxor v20,v20,v0 vxor v0,v19,v20 b .Lbarrett_reduction .Lzero: mr r3,r10 b .Lout FUNC_END(CRC_FUNCTION_NAME)
aixcc-public/challenge-001-exemplar-source
14,446
arch/powerpc/crypto/aes-tab-4k.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * 4K AES tables for PPC AES implementation * * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ /* * These big endian AES encryption/decryption tables have been taken from * crypto/aes_generic.c and are designed to be simply accessed by a combination * of rlwimi/lwz instructions with a minimum of table registers (usually only * one required). Thus they are aligned to 4K. The locality of rotated values * is derived from the reduced offsets that are available in the SPE load * instructions. E.g. evldw, evlwwsplat, ... * * For the safety-conscious it has to be noted that they might be vulnerable * to cache timing attacks because of their size. Nevertheless in contrast to * the generic tables they have been reduced from 16KB to 8KB + 256 bytes. * This is a quite good tradeoff for low power devices (e.g. routers) without * dedicated encryption hardware where we usually have no multiuser * environment. * */ #define R(a, b, c, d) \ 0x##a##b##c##d, 0x##d##a##b##c, 0x##c##d##a##b, 0x##b##c##d##a .data .align 12 .globl PPC_AES_4K_ENCTAB PPC_AES_4K_ENCTAB: /* encryption table, same as crypto_ft_tab in crypto/aes-generic.c */ .long R(c6, 63, 63, a5), R(f8, 7c, 7c, 84) .long R(ee, 77, 77, 99), R(f6, 7b, 7b, 8d) .long R(ff, f2, f2, 0d), R(d6, 6b, 6b, bd) .long R(de, 6f, 6f, b1), R(91, c5, c5, 54) .long R(60, 30, 30, 50), R(02, 01, 01, 03) .long R(ce, 67, 67, a9), R(56, 2b, 2b, 7d) .long R(e7, fe, fe, 19), R(b5, d7, d7, 62) .long R(4d, ab, ab, e6), R(ec, 76, 76, 9a) .long R(8f, ca, ca, 45), R(1f, 82, 82, 9d) .long R(89, c9, c9, 40), R(fa, 7d, 7d, 87) .long R(ef, fa, fa, 15), R(b2, 59, 59, eb) .long R(8e, 47, 47, c9), R(fb, f0, f0, 0b) .long R(41, ad, ad, ec), R(b3, d4, d4, 67) .long R(5f, a2, a2, fd), R(45, af, af, ea) .long R(23, 9c, 9c, bf), R(53, a4, a4, f7) .long R(e4, 72, 72, 96), R(9b, c0, c0, 5b) .long R(75, b7, b7, c2), R(e1, fd, fd, 1c) .long R(3d, 93, 93, ae), R(4c, 26, 26, 6a) .long R(6c, 36, 36, 5a), R(7e, 3f, 3f, 41) .long R(f5, f7, f7, 02), R(83, cc, cc, 4f) .long R(68, 34, 34, 5c), R(51, a5, a5, f4) .long R(d1, e5, e5, 34), R(f9, f1, f1, 08) .long R(e2, 71, 71, 93), R(ab, d8, d8, 73) .long R(62, 31, 31, 53), R(2a, 15, 15, 3f) .long R(08, 04, 04, 0c), R(95, c7, c7, 52) .long R(46, 23, 23, 65), R(9d, c3, c3, 5e) .long R(30, 18, 18, 28), R(37, 96, 96, a1) .long R(0a, 05, 05, 0f), R(2f, 9a, 9a, b5) .long R(0e, 07, 07, 09), R(24, 12, 12, 36) .long R(1b, 80, 80, 9b), R(df, e2, e2, 3d) .long R(cd, eb, eb, 26), R(4e, 27, 27, 69) .long R(7f, b2, b2, cd), R(ea, 75, 75, 9f) .long R(12, 09, 09, 1b), R(1d, 83, 83, 9e) .long R(58, 2c, 2c, 74), R(34, 1a, 1a, 2e) .long R(36, 1b, 1b, 2d), R(dc, 6e, 6e, b2) .long R(b4, 5a, 5a, ee), R(5b, a0, a0, fb) .long R(a4, 52, 52, f6), R(76, 3b, 3b, 4d) .long R(b7, d6, d6, 61), R(7d, b3, b3, ce) .long R(52, 29, 29, 7b), R(dd, e3, e3, 3e) .long R(5e, 2f, 2f, 71), R(13, 84, 84, 97) .long R(a6, 53, 53, f5), R(b9, d1, d1, 68) .long R(00, 00, 00, 00), R(c1, ed, ed, 2c) .long R(40, 20, 20, 60), R(e3, fc, fc, 1f) .long R(79, b1, b1, c8), R(b6, 5b, 5b, ed) .long R(d4, 6a, 6a, be), R(8d, cb, cb, 46) .long R(67, be, be, d9), R(72, 39, 39, 4b) .long R(94, 4a, 4a, de), R(98, 4c, 4c, d4) .long R(b0, 58, 58, e8), R(85, cf, cf, 4a) .long R(bb, d0, d0, 6b), R(c5, ef, ef, 2a) .long R(4f, aa, aa, e5), R(ed, fb, fb, 16) .long R(86, 43, 43, c5), R(9a, 4d, 4d, d7) .long R(66, 33, 33, 55), R(11, 85, 85, 94) .long R(8a, 45, 45, cf), R(e9, f9, f9, 10) .long R(04, 02, 02, 06), R(fe, 7f, 7f, 81) .long R(a0, 50, 50, f0), R(78, 3c, 3c, 44) .long R(25, 9f, 9f, ba), R(4b, a8, a8, e3) .long R(a2, 51, 51, f3), R(5d, a3, a3, fe) .long R(80, 40, 40, c0), R(05, 8f, 8f, 8a) .long R(3f, 92, 92, ad), R(21, 9d, 9d, bc) .long R(70, 38, 38, 48), R(f1, f5, f5, 04) .long R(63, bc, bc, df), R(77, b6, b6, c1) .long R(af, da, da, 75), R(42, 21, 21, 63) .long R(20, 10, 10, 30), R(e5, ff, ff, 1a) .long R(fd, f3, f3, 0e), R(bf, d2, d2, 6d) .long R(81, cd, cd, 4c), R(18, 0c, 0c, 14) .long R(26, 13, 13, 35), R(c3, ec, ec, 2f) .long R(be, 5f, 5f, e1), R(35, 97, 97, a2) .long R(88, 44, 44, cc), R(2e, 17, 17, 39) .long R(93, c4, c4, 57), R(55, a7, a7, f2) .long R(fc, 7e, 7e, 82), R(7a, 3d, 3d, 47) .long R(c8, 64, 64, ac), R(ba, 5d, 5d, e7) .long R(32, 19, 19, 2b), R(e6, 73, 73, 95) .long R(c0, 60, 60, a0), R(19, 81, 81, 98) .long R(9e, 4f, 4f, d1), R(a3, dc, dc, 7f) .long R(44, 22, 22, 66), R(54, 2a, 2a, 7e) .long R(3b, 90, 90, ab), R(0b, 88, 88, 83) .long R(8c, 46, 46, ca), R(c7, ee, ee, 29) .long R(6b, b8, b8, d3), R(28, 14, 14, 3c) .long R(a7, de, de, 79), R(bc, 5e, 5e, e2) .long R(16, 0b, 0b, 1d), R(ad, db, db, 76) .long R(db, e0, e0, 3b), R(64, 32, 32, 56) .long R(74, 3a, 3a, 4e), R(14, 0a, 0a, 1e) .long R(92, 49, 49, db), R(0c, 06, 06, 0a) .long R(48, 24, 24, 6c), R(b8, 5c, 5c, e4) .long R(9f, c2, c2, 5d), R(bd, d3, d3, 6e) .long R(43, ac, ac, ef), R(c4, 62, 62, a6) .long R(39, 91, 91, a8), R(31, 95, 95, a4) .long R(d3, e4, e4, 37), R(f2, 79, 79, 8b) .long R(d5, e7, e7, 32), R(8b, c8, c8, 43) .long R(6e, 37, 37, 59), R(da, 6d, 6d, b7) .long R(01, 8d, 8d, 8c), R(b1, d5, d5, 64) .long R(9c, 4e, 4e, d2), R(49, a9, a9, e0) .long R(d8, 6c, 6c, b4), R(ac, 56, 56, fa) .long R(f3, f4, f4, 07), R(cf, ea, ea, 25) .long R(ca, 65, 65, af), R(f4, 7a, 7a, 8e) .long R(47, ae, ae, e9), R(10, 08, 08, 18) .long R(6f, ba, ba, d5), R(f0, 78, 78, 88) .long R(4a, 25, 25, 6f), R(5c, 2e, 2e, 72) .long R(38, 1c, 1c, 24), R(57, a6, a6, f1) .long R(73, b4, b4, c7), R(97, c6, c6, 51) .long R(cb, e8, e8, 23), R(a1, dd, dd, 7c) .long R(e8, 74, 74, 9c), R(3e, 1f, 1f, 21) .long R(96, 4b, 4b, dd), R(61, bd, bd, dc) .long R(0d, 8b, 8b, 86), R(0f, 8a, 8a, 85) .long R(e0, 70, 70, 90), R(7c, 3e, 3e, 42) .long R(71, b5, b5, c4), R(cc, 66, 66, aa) .long R(90, 48, 48, d8), R(06, 03, 03, 05) .long R(f7, f6, f6, 01), R(1c, 0e, 0e, 12) .long R(c2, 61, 61, a3), R(6a, 35, 35, 5f) .long R(ae, 57, 57, f9), R(69, b9, b9, d0) .long R(17, 86, 86, 91), R(99, c1, c1, 58) .long R(3a, 1d, 1d, 27), R(27, 9e, 9e, b9) .long R(d9, e1, e1, 38), R(eb, f8, f8, 13) .long R(2b, 98, 98, b3), R(22, 11, 11, 33) .long R(d2, 69, 69, bb), R(a9, d9, d9, 70) .long R(07, 8e, 8e, 89), R(33, 94, 94, a7) .long R(2d, 9b, 9b, b6), R(3c, 1e, 1e, 22) .long R(15, 87, 87, 92), R(c9, e9, e9, 20) .long R(87, ce, ce, 49), R(aa, 55, 55, ff) .long R(50, 28, 28, 78), R(a5, df, df, 7a) .long R(03, 8c, 8c, 8f), R(59, a1, a1, f8) .long R(09, 89, 89, 80), R(1a, 0d, 0d, 17) .long R(65, bf, bf, da), R(d7, e6, e6, 31) .long R(84, 42, 42, c6), R(d0, 68, 68, b8) .long R(82, 41, 41, c3), R(29, 99, 99, b0) .long R(5a, 2d, 2d, 77), R(1e, 0f, 0f, 11) .long R(7b, b0, b0, cb), R(a8, 54, 54, fc) .long R(6d, bb, bb, d6), R(2c, 16, 16, 3a) .globl PPC_AES_4K_DECTAB PPC_AES_4K_DECTAB: /* decryption table, same as crypto_it_tab in crypto/aes-generic.c */ .long R(51, f4, a7, 50), R(7e, 41, 65, 53) .long R(1a, 17, a4, c3), R(3a, 27, 5e, 96) .long R(3b, ab, 6b, cb), R(1f, 9d, 45, f1) .long R(ac, fa, 58, ab), R(4b, e3, 03, 93) .long R(20, 30, fa, 55), R(ad, 76, 6d, f6) .long R(88, cc, 76, 91), R(f5, 02, 4c, 25) .long R(4f, e5, d7, fc), R(c5, 2a, cb, d7) .long R(26, 35, 44, 80), R(b5, 62, a3, 8f) .long R(de, b1, 5a, 49), R(25, ba, 1b, 67) .long R(45, ea, 0e, 98), R(5d, fe, c0, e1) .long R(c3, 2f, 75, 02), R(81, 4c, f0, 12) .long R(8d, 46, 97, a3), R(6b, d3, f9, c6) .long R(03, 8f, 5f, e7), R(15, 92, 9c, 95) .long R(bf, 6d, 7a, eb), R(95, 52, 59, da) .long R(d4, be, 83, 2d), R(58, 74, 21, d3) .long R(49, e0, 69, 29), R(8e, c9, c8, 44) .long R(75, c2, 89, 6a), R(f4, 8e, 79, 78) .long R(99, 58, 3e, 6b), R(27, b9, 71, dd) .long R(be, e1, 4f, b6), R(f0, 88, ad, 17) .long R(c9, 20, ac, 66), R(7d, ce, 3a, b4) .long R(63, df, 4a, 18), R(e5, 1a, 31, 82) .long R(97, 51, 33, 60), R(62, 53, 7f, 45) .long R(b1, 64, 77, e0), R(bb, 6b, ae, 84) .long R(fe, 81, a0, 1c), R(f9, 08, 2b, 94) .long R(70, 48, 68, 58), R(8f, 45, fd, 19) .long R(94, de, 6c, 87), R(52, 7b, f8, b7) .long R(ab, 73, d3, 23), R(72, 4b, 02, e2) .long R(e3, 1f, 8f, 57), R(66, 55, ab, 2a) .long R(b2, eb, 28, 07), R(2f, b5, c2, 03) .long R(86, c5, 7b, 9a), R(d3, 37, 08, a5) .long R(30, 28, 87, f2), R(23, bf, a5, b2) .long R(02, 03, 6a, ba), R(ed, 16, 82, 5c) .long R(8a, cf, 1c, 2b), R(a7, 79, b4, 92) .long R(f3, 07, f2, f0), R(4e, 69, e2, a1) .long R(65, da, f4, cd), R(06, 05, be, d5) .long R(d1, 34, 62, 1f), R(c4, a6, fe, 8a) .long R(34, 2e, 53, 9d), R(a2, f3, 55, a0) .long R(05, 8a, e1, 32), R(a4, f6, eb, 75) .long R(0b, 83, ec, 39), R(40, 60, ef, aa) .long R(5e, 71, 9f, 06), R(bd, 6e, 10, 51) .long R(3e, 21, 8a, f9), R(96, dd, 06, 3d) .long R(dd, 3e, 05, ae), R(4d, e6, bd, 46) .long R(91, 54, 8d, b5), R(71, c4, 5d, 05) .long R(04, 06, d4, 6f), R(60, 50, 15, ff) .long R(19, 98, fb, 24), R(d6, bd, e9, 97) .long R(89, 40, 43, cc), R(67, d9, 9e, 77) .long R(b0, e8, 42, bd), R(07, 89, 8b, 88) .long R(e7, 19, 5b, 38), R(79, c8, ee, db) .long R(a1, 7c, 0a, 47), R(7c, 42, 0f, e9) .long R(f8, 84, 1e, c9), R(00, 00, 00, 00) .long R(09, 80, 86, 83), R(32, 2b, ed, 48) .long R(1e, 11, 70, ac), R(6c, 5a, 72, 4e) .long R(fd, 0e, ff, fb), R(0f, 85, 38, 56) .long R(3d, ae, d5, 1e), R(36, 2d, 39, 27) .long R(0a, 0f, d9, 64), R(68, 5c, a6, 21) .long R(9b, 5b, 54, d1), R(24, 36, 2e, 3a) .long R(0c, 0a, 67, b1), R(93, 57, e7, 0f) .long R(b4, ee, 96, d2), R(1b, 9b, 91, 9e) .long R(80, c0, c5, 4f), R(61, dc, 20, a2) .long R(5a, 77, 4b, 69), R(1c, 12, 1a, 16) .long R(e2, 93, ba, 0a), R(c0, a0, 2a, e5) .long R(3c, 22, e0, 43), R(12, 1b, 17, 1d) .long R(0e, 09, 0d, 0b), R(f2, 8b, c7, ad) .long R(2d, b6, a8, b9), R(14, 1e, a9, c8) .long R(57, f1, 19, 85), R(af, 75, 07, 4c) .long R(ee, 99, dd, bb), R(a3, 7f, 60, fd) .long R(f7, 01, 26, 9f), R(5c, 72, f5, bc) .long R(44, 66, 3b, c5), R(5b, fb, 7e, 34) .long R(8b, 43, 29, 76), R(cb, 23, c6, dc) .long R(b6, ed, fc, 68), R(b8, e4, f1, 63) .long R(d7, 31, dc, ca), R(42, 63, 85, 10) .long R(13, 97, 22, 40), R(84, c6, 11, 20) .long R(85, 4a, 24, 7d), R(d2, bb, 3d, f8) .long R(ae, f9, 32, 11), R(c7, 29, a1, 6d) .long R(1d, 9e, 2f, 4b), R(dc, b2, 30, f3) .long R(0d, 86, 52, ec), R(77, c1, e3, d0) .long R(2b, b3, 16, 6c), R(a9, 70, b9, 99) .long R(11, 94, 48, fa), R(47, e9, 64, 22) .long R(a8, fc, 8c, c4), R(a0, f0, 3f, 1a) .long R(56, 7d, 2c, d8), R(22, 33, 90, ef) .long R(87, 49, 4e, c7), R(d9, 38, d1, c1) .long R(8c, ca, a2, fe), R(98, d4, 0b, 36) .long R(a6, f5, 81, cf), R(a5, 7a, de, 28) .long R(da, b7, 8e, 26), R(3f, ad, bf, a4) .long R(2c, 3a, 9d, e4), R(50, 78, 92, 0d) .long R(6a, 5f, cc, 9b), R(54, 7e, 46, 62) .long R(f6, 8d, 13, c2), R(90, d8, b8, e8) .long R(2e, 39, f7, 5e), R(82, c3, af, f5) .long R(9f, 5d, 80, be), R(69, d0, 93, 7c) .long R(6f, d5, 2d, a9), R(cf, 25, 12, b3) .long R(c8, ac, 99, 3b), R(10, 18, 7d, a7) .long R(e8, 9c, 63, 6e), R(db, 3b, bb, 7b) .long R(cd, 26, 78, 09), R(6e, 59, 18, f4) .long R(ec, 9a, b7, 01), R(83, 4f, 9a, a8) .long R(e6, 95, 6e, 65), R(aa, ff, e6, 7e) .long R(21, bc, cf, 08), R(ef, 15, e8, e6) .long R(ba, e7, 9b, d9), R(4a, 6f, 36, ce) .long R(ea, 9f, 09, d4), R(29, b0, 7c, d6) .long R(31, a4, b2, af), R(2a, 3f, 23, 31) .long R(c6, a5, 94, 30), R(35, a2, 66, c0) .long R(74, 4e, bc, 37), R(fc, 82, ca, a6) .long R(e0, 90, d0, b0), R(33, a7, d8, 15) .long R(f1, 04, 98, 4a), R(41, ec, da, f7) .long R(7f, cd, 50, 0e), R(17, 91, f6, 2f) .long R(76, 4d, d6, 8d), R(43, ef, b0, 4d) .long R(cc, aa, 4d, 54), R(e4, 96, 04, df) .long R(9e, d1, b5, e3), R(4c, 6a, 88, 1b) .long R(c1, 2c, 1f, b8), R(46, 65, 51, 7f) .long R(9d, 5e, ea, 04), R(01, 8c, 35, 5d) .long R(fa, 87, 74, 73), R(fb, 0b, 41, 2e) .long R(b3, 67, 1d, 5a), R(92, db, d2, 52) .long R(e9, 10, 56, 33), R(6d, d6, 47, 13) .long R(9a, d7, 61, 8c), R(37, a1, 0c, 7a) .long R(59, f8, 14, 8e), R(eb, 13, 3c, 89) .long R(ce, a9, 27, ee), R(b7, 61, c9, 35) .long R(e1, 1c, e5, ed), R(7a, 47, b1, 3c) .long R(9c, d2, df, 59), R(55, f2, 73, 3f) .long R(18, 14, ce, 79), R(73, c7, 37, bf) .long R(53, f7, cd, ea), R(5f, fd, aa, 5b) .long R(df, 3d, 6f, 14), R(78, 44, db, 86) .long R(ca, af, f3, 81), R(b9, 68, c4, 3e) .long R(38, 24, 34, 2c), R(c2, a3, 40, 5f) .long R(16, 1d, c3, 72), R(bc, e2, 25, 0c) .long R(28, 3c, 49, 8b), R(ff, 0d, 95, 41) .long R(39, a8, 01, 71), R(08, 0c, b3, de) .long R(d8, b4, e4, 9c), R(64, 56, c1, 90) .long R(7b, cb, 84, 61), R(d5, 32, b6, 70) .long R(48, 6c, 5c, 74), R(d0, b8, 57, 42) .globl PPC_AES_4K_DECTAB2 PPC_AES_4K_DECTAB2: /* decryption table, same as crypto_il_tab in crypto/aes-generic.c */ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
aixcc-public/challenge-001-exemplar-source
27,722
arch/powerpc/crypto/crc32c-vpmsum_asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Calculate a crc32c with vpmsum acceleration * * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM */ .section .rodata .balign 16 .byteswap_constant: /* byte reverse permute constant */ .octa 0x0F0E0D0C0B0A09080706050403020100 .constants: /* Reduce 262144 kbits to 1024 bits */ /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ .octa 0x00000000b6ca9e20000000009c37c408 /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ .octa 0x00000000350249a800000001b51df26c /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ .octa 0x00000001862dac54000000000724b9d0 /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ .octa 0x00000001d87fb48c00000001c00532fe /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ .octa 0x00000001f39b699e00000000f05a9362 /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ .octa 0x0000000101da11b400000001e1007970 /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ .octa 0x00000001cab571e000000000a57366ee /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ .octa 0x00000000c7020cfe0000000192011284 /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ .octa 0x00000000cdaed1ae0000000162716d9a /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ .octa 0x00000001e804effc00000000cd97ecde /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ .octa 0x0000000077c3ea3a0000000058812bc0 /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ .octa 0x0000000068df31b40000000088b8c12e /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ .octa 0x00000000b059b6c200000001230b234c /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ .octa 0x0000000145fb8ed800000001120b416e /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ .octa 0x00000000cbc0916800000001974aecb0 /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ .octa 0x000000005ceeedc2000000008ee3f226 /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ .octa 0x0000000047d74e8600000001089aba9a /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ .octa 0x00000001407e9e220000000065113872 /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ .octa 0x00000001da967bda000000005c07ec10 /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ .octa 0x000000006c8983680000000187590924 /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ .octa 0x00000000f2d14c9800000000e35da7c6 /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ .octa 0x00000001993c6ad4000000000415855a /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ .octa 0x000000014683d1ac0000000073617758 /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ .octa 0x00000001a7c93e6c0000000176021d28 /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ .octa 0x000000010211e90a00000001c358fd0a /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ .octa 0x000000001119403e00000001ff7a2c18 /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ .octa 0x000000001c3261aa00000000f2d9f7e4 /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ .octa 0x000000014e37a634000000016cf1f9c8 /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ .octa 0x0000000073786c0c000000010af9279a /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ .octa 0x000000011dc037f80000000004f101e8 /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ .octa 0x0000000031433dfc0000000070bcf184 /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ .octa 0x000000009cde8348000000000a8de642 /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ .octa 0x0000000038d3c2a60000000062ea130c /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ .octa 0x000000011b25f26000000001eb31cbb2 /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ .octa 0x000000001629e6f00000000170783448 /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ .octa 0x0000000160838b4c00000001a684b4c6 /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ .octa 0x000000007a44011c00000000253ca5b4 /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ .octa 0x00000000226f417a0000000057b4b1e2 /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ .octa 0x0000000045eb2eb400000000b6bd084c /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ .octa 0x000000014459d70c0000000123c2d592 /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ .octa 0x00000001d406ed8200000000159dafce /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ .octa 0x0000000160c8e1a80000000127e1a64e /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ .octa 0x0000000027ba80980000000056860754 /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ .octa 0x000000006d92d01800000001e661aae8 /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ .octa 0x000000012ed7e3f200000000f82c6166 /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ .octa 0x000000002dc8778800000000c4f9c7ae /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ .octa 0x0000000018240bb80000000074203d20 /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ .octa 0x000000001ad381580000000198173052 /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ .octa 0x00000001396b78f200000001ce8aba54 /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ .octa 0x000000011a68133400000001850d5d94 /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ .octa 0x000000012104732e00000001d609239c /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ .octa 0x00000000a140d90c000000001595f048 /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ .octa 0x00000001b7215eda0000000042ccee08 /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ .octa 0x00000001aaf1df3c000000010a389d74 /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ .octa 0x0000000029d15b8a000000012a840da6 /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ .octa 0x00000000f1a96922000000001d181c0c /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ .octa 0x00000001ac80d03c0000000068b7d1f6 /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ .octa 0x000000000f11d56a000000005b0f14fc /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ .octa 0x00000001f1c022a20000000179e9e730 /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ .octa 0x0000000173d00ae200000001ce1368d6 /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ .octa 0x00000001d4ffe4ac0000000112c3a84c /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ .octa 0x000000016edc5ae400000000de940fee /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ .octa 0x00000001f1a0214000000000fe896b7e /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ .octa 0x00000000ca0b28a000000001f797431c /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ .octa 0x00000001928e30a20000000053e989ba /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ .octa 0x0000000097b1b002000000003920cd16 /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ .octa 0x00000000b15bf90600000001e6f579b8 /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ .octa 0x00000000411c5d52000000007493cb0a /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ .octa 0x00000001c36f330000000001bdd376d8 /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ .octa 0x00000001119227e0000000016badfee6 /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ .octa 0x00000000114d47020000000071de5c58 /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ .octa 0x00000000458b5b9800000000453f317c /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ .octa 0x000000012e31fb8e0000000121675cce /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ .octa 0x000000005cf619d800000001f409ee92 /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ .octa 0x0000000063f4d8b200000000f36b9c88 /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ .octa 0x000000004138dc8a0000000036b398f4 /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ .octa 0x00000001d29ee8e000000001748f9adc /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ .octa 0x000000006a08ace800000001be94ec00 /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ .octa 0x0000000127d4201000000000b74370d6 /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ .octa 0x0000000019d76b6200000001174d0b98 /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ .octa 0x00000001b1471f6e00000000befc06a4 /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ .octa 0x00000001f64c19cc00000001ae125288 /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ .octa 0x00000000003c0ea00000000095c19b34 /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ .octa 0x000000014d73abf600000001a78496f2 /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ .octa 0x00000001620eb84400000001ac5390a0 /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ .octa 0x0000000147655048000000002a80ed6e /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ .octa 0x0000000067b5077e00000001fa9b0128 /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ .octa 0x0000000010ffe20600000001ea94929e /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ .octa 0x000000000fee8f1e0000000125f4305c /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ .octa 0x00000001da26fbae00000001471e2002 /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ .octa 0x00000001b3a8bd880000000132d2253a /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ .octa 0x00000000e8f3898e00000000f26b3592 /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ .octa 0x00000000b0d0d28c00000000bc8b67b0 /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ .octa 0x0000000030f2a798000000013a826ef2 /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ .octa 0x000000000fba10020000000081482c84 /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ .octa 0x00000000bdb9bd7200000000e77307c2 /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ .octa 0x0000000075d3bf5a00000000d4a07ec8 /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ .octa 0x00000000ef1f98a00000000017102100 /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ .octa 0x00000000689c760200000000db406486 /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ .octa 0x000000016d5fa5fe0000000192db7f88 /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ .octa 0x00000001d0d2b9ca000000018bf67b1e /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ .octa 0x0000000041e7b470000000007c09163e /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ .octa 0x00000001cbb6495e000000000adac060 /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ .octa 0x000000010052a0b000000000bd8316ae /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ .octa 0x00000001d8effb5c000000019f09ab54 /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ .octa 0x00000001d969853c0000000125155542 /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ .octa 0x00000000523ccce2000000018fdb5882 /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ .octa 0x000000001e2436bc00000000e794b3f4 /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ .octa 0x00000000ddd1c3a2000000016f9bb022 /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ .octa 0x0000000019fcfe3800000000290c9978 /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ .octa 0x00000001ce95db640000000083c0f350 /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ .octa 0x00000000af5828060000000173ea6628 /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ .octa 0x00000001006388f600000001c8b4e00a /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ .octa 0x0000000179eca00a00000000de95d6aa /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ .octa 0x0000000122410a6a000000010b7f7248 /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ .octa 0x000000004288e87c00000001326e3a06 /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ .octa 0x000000016c5490da00000000bb62c2e6 /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ .octa 0x00000000d1c71f6e0000000156a4b2c2 /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ .octa 0x00000001b4ce08a6000000011dfe763a /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ .octa 0x00000001466ba60c000000007bcca8e2 /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ .octa 0x00000001f6c488a40000000186118faa /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ .octa 0x000000013bfb06820000000111a65a88 /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ .octa 0x00000000690e9e54000000003565e1c4 /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ .octa 0x00000000281346b6000000012ed02a82 /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ .octa 0x000000015646402400000000c486ecfc /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ .octa 0x000000016063a8dc0000000001b951b2 /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ .octa 0x0000000116a663620000000048143916 /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ .octa 0x000000017e8aa4d200000001dc2ae124 /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ .octa 0x00000001728eb10c00000001416c58d6 /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ .octa 0x00000001b08fd7fa00000000a479744a /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ .octa 0x00000001092a16e80000000096ca3a26 /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ .octa 0x00000000a505637c00000000ff223d4e /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ .octa 0x00000000d94869b2000000010e84da42 /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ .octa 0x00000001c8b203ae00000001b61ba3d0 /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ .octa 0x000000005704aea000000000680f2de8 /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ .octa 0x000000012e295fa2000000008772a9a8 /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ .octa 0x000000011d0908bc0000000155f295bc /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ .octa 0x0000000193ed97ea00000000595f9282 /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ .octa 0x000000013a0f1c520000000164b1c25a /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ .octa 0x000000010c2c40c000000000fbd67c50 /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ .octa 0x00000000ff6fac3e0000000096076268 /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ .octa 0x000000017b3609c000000001d288e4cc /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ .octa 0x0000000088c8c92200000001eaac1bdc /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ .octa 0x00000001751baae600000001f1ea39e2 /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ .octa 0x000000010795297200000001eb6506fc /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ .octa 0x0000000162b00abe000000010f806ffe /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ .octa 0x000000000d7b404c000000010408481e /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ .octa 0x00000000763b13d40000000188260534 /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ .octa 0x00000000f6dc22d80000000058fc73e0 /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ .octa 0x000000007daae06000000000391c59b8 /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ .octa 0x000000013359ab7c000000018b638400 /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ .octa 0x000000008add438a000000011738f5c4 /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ .octa 0x00000001edbefdea000000008cf7c6da /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ .octa 0x000000004104e0f800000001ef97fb16 /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ .octa 0x00000000b48a82220000000102130e20 /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ .octa 0x00000001bcb4684400000000db968898 /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ .octa 0x000000013293ce0a00000000b5047b5e /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ .octa 0x00000001710d0844000000010b90fdb2 /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ .octa 0x0000000117907f6e000000004834a32e /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ .octa 0x0000000087ddf93e0000000059c8f2b0 /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ .octa 0x000000005970e9b00000000122cec508 /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ .octa 0x0000000185b2b7d0000000000a330cda /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ .octa 0x00000001dcee0efc000000014a47148c /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ .octa 0x0000000030da27220000000042c61cb8 /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ .octa 0x000000012f925a180000000012fe6960 /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ .octa 0x00000000dd2e357c00000000dbda2c20 /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ .octa 0x00000000071c80de000000011122410c /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ .octa 0x000000011513140a00000000977b2070 /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ .octa 0x00000001df876e8e000000014050438e /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ .octa 0x000000015f81d6ce0000000147c840e8 /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ .octa 0x000000019dd94dbe00000001cc7c88ce /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ .octa 0x00000001373d206e00000001476b35a4 /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ .octa 0x00000000668ccade000000013d52d508 /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ .octa 0x00000001b192d268000000008e4be32e /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ .octa 0x00000000e30f3a7800000000024120fe /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ .octa 0x000000010ef1f7bc00000000ddecddb4 /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ .octa 0x00000001f5ac738000000000d4d403bc /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ .octa 0x000000011822ea7000000001734b89aa /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ .octa 0x00000000c3a33848000000010e7a58d6 /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ .octa 0x00000001bd151c2400000001f9f04e9c /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ .octa 0x0000000056002d7600000000b692225e /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ .octa 0x000000014657c4f4000000019b8d3f3e /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ .octa 0x0000000113742d7c00000001a874f11e /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ .octa 0x000000019c5920ba000000010d5a4254 /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ .octa 0x000000005216d2d600000000bbb2f5d6 /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ .octa 0x0000000136f5ad8a0000000179cc0e36 /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ .octa 0x000000018b07beb600000001dca1da4a /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ .octa 0x00000000db1e93b000000000feb1a192 /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ .octa 0x000000000b96fa3a00000000d1eeedd6 /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ .octa 0x00000001d9968af0000000008fad9bb4 /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ .octa 0x000000000e4a77a200000001884938e4 /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ .octa 0x00000000508c2ac800000001bc2e9bc0 /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ .octa 0x0000000021572a8000000001f9658a68 /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ .octa 0x00000001b859daf2000000001b9224fc /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ .octa 0x000000016f7884740000000055b2fb84 /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ .octa 0x00000001b438810e000000018b090348 /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ .octa 0x0000000095ddc6f2000000011ccbd5ea /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ .octa 0x00000001d977c20c0000000007ae47f8 /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ .octa 0x00000000ebedb99a0000000172acbec0 /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ .octa 0x00000001df9e9e9200000001c6e3ff20 /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ .octa 0x00000001a4a3f95200000000e1b38744 /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ .octa 0x00000000e2f5122000000000791585b2 /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ .octa 0x000000004aa01f3e00000000ac53b894 /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ .octa 0x00000000b3e90a5800000001ed5f2cf4 /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ .octa 0x000000000c9ca2aa00000001df48b2e0 /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ .octa 0x000000015168231600000000049c1c62 /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ .octa 0x0000000036fce78c000000017c460c12 /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ .octa 0x000000009037dc10000000015be4da7e /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ .octa 0x00000000d3298582000000010f38f668 /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ .octa 0x00000001b42e8ad60000000039f40a00 /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ .octa 0x00000000142a983800000000bd4c10c4 /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ .octa 0x0000000109c7f1900000000042db1d98 /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ .octa 0x0000000056ff931000000001c905bae6 /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ .octa 0x00000001594513aa00000000069d40ea /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ .octa 0x00000001e3b5b1e8000000008e4fbad0 /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ .octa 0x000000011dd5fc080000000047bedd46 /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ .octa 0x00000001675f0cc20000000026396bf8 /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ .octa 0x00000000d1c8dd4400000000379beb92 /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ .octa 0x0000000115ebd3d8000000000abae54a /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ .octa 0x00000001ecbd0dac0000000007e6a128 /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ .octa 0x00000000cdf67af2000000000ade29d2 /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ .octa 0x000000004c01ff4c00000000f974c45c /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ .octa 0x00000000f2d8657e00000000e77ac60a /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ .octa 0x000000006bae74c40000000145895816 /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ .octa 0x0000000152af8aa00000000038e362be /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ .octa 0x0000000004663802000000007f991a64 /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ .octa 0x00000001ab2f5afc00000000fa366d3a /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ .octa 0x0000000074a4ebd400000001a2bb34f0 /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ .octa 0x00000001d7ab3a4c0000000028a9981e /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ .octa 0x00000001a8da60c600000001dbc672be /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ .octa 0x000000013cf6382000000000b04d77f6 /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ .octa 0x00000000bec12e1e0000000124400d96 /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ .octa 0x00000001c6368010000000014ca4b414 /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ .octa 0x00000001e6e78758000000012fe2c938 /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ .octa 0x000000008d7f2b3c00000001faed01e6 /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ .octa 0x000000016b4a156e000000007e80ecfe /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ .octa 0x00000001c63cfeb60000000098daee94 /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ .octa 0x000000015f902670000000010a04edea /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ .octa 0x00000001cd5de11e00000001c00b4524 /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ .octa 0x000000001acaec540000000170296550 /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ .octa 0x000000002bd0ca780000000181afaa48 /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ .octa 0x0000000032d63d5c0000000185a31ffa /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ .octa 0x000000001c6d4e4c000000002469f608 /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ .octa 0x0000000106a60b92000000006980102a /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ .octa 0x00000000d3855e120000000111ea9ca8 /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ .octa 0x00000000e312563600000001bd1d29ce /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ .octa 0x000000009e8f7ea400000001b34b9580 /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ .octa 0x00000001c82e562c000000003076054e /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ .octa 0x00000000ca9f09ce000000012a608ea4 /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ .octa 0x00000000c63764e600000000784d05fe /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ .octa 0x0000000168d2e49e000000016ef0d82a /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ .octa 0x00000000e986c1480000000075bda454 /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ .octa 0x00000000cfb65894000000003dc0a1c4 /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ .octa 0x0000000111cadee400000000e9a5d8be /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ .octa 0x0000000171fb63ce00000001609bc4b4 .short_constants: /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */ /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */ .octa 0x7fec2963e5bf80485cf015c388e56f72 /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */ .octa 0x38e888d4844752a9963a18920246e2e6 /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */ .octa 0x42316c00730206ad419a441956993a31 /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */ .octa 0x543d5c543e65ddf9924752ba2b830011 /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */ .octa 0x78e87aaf56767c9255bd7f9518e4a304 /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */ .octa 0x8f68fcec1903da7f6d76739fe0553f1e /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */ .octa 0x3f4840246791d588c133722b1fe0b5c3 /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */ .octa 0x34c96751b04de25a64b67ee0e55ef1f3 /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */ .octa 0x156c8e180b4a395b069db049b8fdb1e7 /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */ .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */ .octa 0x041d37768cd75659817cdc5119b29a35 /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */ .octa 0x3a0777818cfaa9651ce9d94b36c41f1c /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */ .octa 0x0e148e8252377a554f256efcb82be955 /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */ .octa 0x9c25531d19e65ddeec1631edb2dea967 /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */ .octa 0x790606ff9957c0a65d27e147510ac59a /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */ .octa 0x82f63b786ea2d55ca66805eb18b8ea18 .barrett_constants: /* 33 bit reflected Barrett constant m - (4^32)/n */ .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */ /* 33 bit reflected Barrett constant n */ .octa 0x00000000000000000000000105ec76f1 #define CRC_FUNCTION_NAME __crc32c_vpmsum #define REFLECT #include "crc32-vpmsum_core.S"
aixcc-public/challenge-001-exemplar-source
7,412
arch/powerpc/crypto/md5-asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Fast MD5 implementation for PPC * * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/asm-compat.h> #define rHP r3 #define rWP r4 #define rH0 r0 #define rH1 r6 #define rH2 r7 #define rH3 r5 #define rW00 r8 #define rW01 r9 #define rW02 r10 #define rW03 r11 #define rW04 r12 #define rW05 r14 #define rW06 r15 #define rW07 r16 #define rW08 r17 #define rW09 r18 #define rW10 r19 #define rW11 r20 #define rW12 r21 #define rW13 r22 #define rW14 r23 #define rW15 r24 #define rT0 r25 #define rT1 r26 #define INITIALIZE \ PPC_STLU r1,-INT_FRAME_SIZE(r1); \ SAVE_GPRS(14, 26, r1) /* push registers onto stack */ #define FINALIZE \ REST_GPRS(14, 26, r1); /* pop registers from stack */ \ addi r1,r1,INT_FRAME_SIZE #ifdef __BIG_ENDIAN__ #define LOAD_DATA(reg, off) \ lwbrx reg,0,rWP; /* load data */ #define INC_PTR \ addi rWP,rWP,4; /* increment per word */ #define NEXT_BLOCK /* nothing to do */ #else #define LOAD_DATA(reg, off) \ lwz reg,off(rWP); /* load data */ #define INC_PTR /* nothing to do */ #define NEXT_BLOCK \ addi rWP,rWP,64; /* increment per block */ #endif #define R_00_15(a, b, c, d, w0, w1, p, q, off, k0h, k0l, k1h, k1l) \ LOAD_DATA(w0, off) /* W */ \ and rT0,b,c; /* 1: f = b and c */ \ INC_PTR /* ptr++ */ \ andc rT1,d,b; /* 1: f' = ~b and d */ \ LOAD_DATA(w1, off+4) /* W */ \ or rT0,rT0,rT1; /* 1: f = f or f' */ \ addi w0,w0,k0l; /* 1: wk = w + k */ \ add a,a,rT0; /* 1: a = a + f */ \ addis w0,w0,k0h; /* 1: wk = w + k' */ \ addis w1,w1,k1h; /* 2: wk = w + k */ \ add a,a,w0; /* 1: a = a + wk */ \ addi w1,w1,k1l; /* 2: wk = w + k' */ \ rotrwi a,a,p; /* 1: a = a rotl x */ \ add d,d,w1; /* 2: a = a + wk */ \ add a,a,b; /* 1: a = a + b */ \ and rT0,a,b; /* 2: f = b and c */ \ andc rT1,c,a; /* 2: f' = ~b and d */ \ or rT0,rT0,rT1; /* 2: f = f or f' */ \ add d,d,rT0; /* 2: a = a + f */ \ INC_PTR /* ptr++ */ \ rotrwi d,d,q; /* 2: a = a rotl x */ \ add d,d,a; /* 2: a = a + b */ #define R_16_31(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \ andc rT0,c,d; /* 1: f = c and ~d */ \ and rT1,b,d; /* 1: f' = b and d */ \ addi w0,w0,k0l; /* 1: wk = w + k */ \ or rT0,rT0,rT1; /* 1: f = f or f' */ \ addis w0,w0,k0h; /* 1: wk = w + k' */ \ add a,a,rT0; /* 1: a = a + f */ \ addi w1,w1,k1l; /* 2: wk = w + k */ \ add a,a,w0; /* 1: a = a + wk */ \ addis w1,w1,k1h; /* 2: wk = w + k' */ \ andc rT0,b,c; /* 2: f = c and ~d */ \ rotrwi a,a,p; /* 1: a = a rotl x */ \ add a,a,b; /* 1: a = a + b */ \ add d,d,w1; /* 2: a = a + wk */ \ and rT1,a,c; /* 2: f' = b and d */ \ or rT0,rT0,rT1; /* 2: f = f or f' */ \ add d,d,rT0; /* 2: a = a + f */ \ rotrwi d,d,q; /* 2: a = a rotl x */ \ add d,d,a; /* 2: a = a +b */ #define R_32_47(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \ xor rT0,b,c; /* 1: f' = b xor c */ \ addi w0,w0,k0l; /* 1: wk = w + k */ \ xor rT1,rT0,d; /* 1: f = f xor f' */ \ addis w0,w0,k0h; /* 1: wk = w + k' */ \ add a,a,rT1; /* 1: a = a + f */ \ addi w1,w1,k1l; /* 2: wk = w + k */ \ add a,a,w0; /* 1: a = a + wk */ \ addis w1,w1,k1h; /* 2: wk = w + k' */ \ rotrwi a,a,p; /* 1: a = a rotl x */ \ add d,d,w1; /* 2: a = a + wk */ \ add a,a,b; /* 1: a = a + b */ \ xor rT1,rT0,a; /* 2: f = b xor f' */ \ add d,d,rT1; /* 2: a = a + f */ \ rotrwi d,d,q; /* 2: a = a rotl x */ \ add d,d,a; /* 2: a = a + b */ #define R_48_63(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \ addi w0,w0,k0l; /* 1: w = w + k */ \ orc rT0,b,d; /* 1: f = b or ~d */ \ addis w0,w0,k0h; /* 1: w = w + k' */ \ xor rT0,rT0,c; /* 1: f = f xor c */ \ add a,a,w0; /* 1: a = a + wk */ \ addi w1,w1,k1l; /* 2: w = w + k */ \ add a,a,rT0; /* 1: a = a + f */ \ addis w1,w1,k1h; /* 2: w = w + k' */ \ rotrwi a,a,p; /* 1: a = a rotl x */ \ add a,a,b; /* 1: a = a + b */ \ orc rT0,a,c; /* 2: f = b or ~d */ \ add d,d,w1; /* 2: a = a + wk */ \ xor rT0,rT0,b; /* 2: f = f xor c */ \ add d,d,rT0; /* 2: a = a + f */ \ rotrwi d,d,q; /* 2: a = a rotl x */ \ add d,d,a; /* 2: a = a + b */ _GLOBAL(ppc_md5_transform) INITIALIZE mtctr r5 lwz rH0,0(rHP) lwz rH1,4(rHP) lwz rH2,8(rHP) lwz rH3,12(rHP) ppc_md5_main: R_00_15(rH0, rH1, rH2, rH3, rW00, rW01, 25, 20, 0, 0xd76b, -23432, 0xe8c8, -18602) R_00_15(rH2, rH3, rH0, rH1, rW02, rW03, 15, 10, 8, 0x2420, 0x70db, 0xc1be, -12562) R_00_15(rH0, rH1, rH2, rH3, rW04, rW05, 25, 20, 16, 0xf57c, 0x0faf, 0x4788, -14806) R_00_15(rH2, rH3, rH0, rH1, rW06, rW07, 15, 10, 24, 0xa830, 0x4613, 0xfd47, -27391) R_00_15(rH0, rH1, rH2, rH3, rW08, rW09, 25, 20, 32, 0x6981, -26408, 0x8b45, -2129) R_00_15(rH2, rH3, rH0, rH1, rW10, rW11, 15, 10, 40, 0xffff, 0x5bb1, 0x895d, -10306) R_00_15(rH0, rH1, rH2, rH3, rW12, rW13, 25, 20, 48, 0x6b90, 0x1122, 0xfd98, 0x7193) R_00_15(rH2, rH3, rH0, rH1, rW14, rW15, 15, 10, 56, 0xa679, 0x438e, 0x49b4, 0x0821) R_16_31(rH0, rH1, rH2, rH3, rW01, rW06, 27, 23, 0x0d56, 0x6e0c, 0x1810, 0x6d2d) R_16_31(rH2, rH3, rH0, rH1, rW11, rW00, 18, 12, 0x9d02, -32109, 0x124c, 0x2332) R_16_31(rH0, rH1, rH2, rH3, rW05, rW10, 27, 23, 0x8ea7, 0x4a33, 0x0245, -18270) R_16_31(rH2, rH3, rH0, rH1, rW15, rW04, 18, 12, 0x8eee, -8608, 0xf258, -5095) R_16_31(rH0, rH1, rH2, rH3, rW09, rW14, 27, 23, 0x969d, -10697, 0x1cbe, -15288) R_16_31(rH2, rH3, rH0, rH1, rW03, rW08, 18, 12, 0x3317, 0x3e99, 0xdbd9, 0x7c15) R_16_31(rH0, rH1, rH2, rH3, rW13, rW02, 27, 23, 0xac4b, 0x7772, 0xd8cf, 0x331d) R_16_31(rH2, rH3, rH0, rH1, rW07, rW12, 18, 12, 0x6a28, 0x6dd8, 0x219a, 0x3b68) R_32_47(rH0, rH1, rH2, rH3, rW05, rW08, 28, 21, 0x29cb, 0x28e5, 0x4218, -7788) R_32_47(rH2, rH3, rH0, rH1, rW11, rW14, 16, 9, 0x473f, 0x06d1, 0x3aae, 0x3036) R_32_47(rH0, rH1, rH2, rH3, rW01, rW04, 28, 21, 0xaea1, -15134, 0x640b, -11295) R_32_47(rH2, rH3, rH0, rH1, rW07, rW10, 16, 9, 0x8f4c, 0x4887, 0xbc7c, -22499) R_32_47(rH0, rH1, rH2, rH3, rW13, rW00, 28, 21, 0x7eb8, -27199, 0x00ea, 0x6050) R_32_47(rH2, rH3, rH0, rH1, rW03, rW06, 16, 9, 0xe01a, 0x22fe, 0x4447, 0x69c5) R_32_47(rH0, rH1, rH2, rH3, rW09, rW12, 28, 21, 0xb7f3, 0x0253, 0x59b1, 0x4d5b) R_32_47(rH2, rH3, rH0, rH1, rW15, rW02, 16, 9, 0x4701, -27017, 0xc7bd, -19859) R_48_63(rH0, rH1, rH2, rH3, rW00, rW07, 26, 22, 0x0988, -1462, 0x4c70, -19401) R_48_63(rH2, rH3, rH0, rH1, rW14, rW05, 17, 11, 0xadaf, -5221, 0xfc99, 0x66f7) R_48_63(rH0, rH1, rH2, rH3, rW12, rW03, 26, 22, 0x7e80, -16418, 0xba1e, -25587) R_48_63(rH2, rH3, rH0, rH1, rW10, rW01, 17, 11, 0x4130, 0x380d, 0xe0c5, 0x738d) lwz rW00,0(rHP) R_48_63(rH0, rH1, rH2, rH3, rW08, rW15, 26, 22, 0xe837, -30770, 0xde8a, 0x69e8) lwz rW14,4(rHP) R_48_63(rH2, rH3, rH0, rH1, rW06, rW13, 17, 11, 0x9e79, 0x260f, 0x256d, -27941) lwz rW12,8(rHP) R_48_63(rH0, rH1, rH2, rH3, rW04, rW11, 26, 22, 0xab75, -20775, 0x4f9e, -28397) lwz rW10,12(rHP) R_48_63(rH2, rH3, rH0, rH1, rW02, rW09, 17, 11, 0x662b, 0x7c56, 0x11b2, 0x0358) add rH0,rH0,rW00 stw rH0,0(rHP) add rH1,rH1,rW14 stw rH1,4(rHP) add rH2,rH2,rW12 stw rH2,8(rHP) add rH3,rH3,rW10 stw rH3,12(rHP) NEXT_BLOCK bdnz ppc_md5_main FINALIZE blr
aixcc-public/challenge-001-exemplar-source
24,680
arch/powerpc/crypto/crct10dif-vpmsum_asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Calculate a CRC T10DIF with vpmsum acceleration * * Constants generated by crc32-vpmsum, available at * https://github.com/antonblanchard/crc32-vpmsum * * crc32-vpmsum is * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM */ .section .rodata .balign 16 .byteswap_constant: /* byte reverse permute constant */ .octa 0x0F0E0D0C0B0A09080706050403020100 .constants: /* Reduce 262144 kbits to 1024 bits */ /* x^261184 mod p(x), x^261120 mod p(x) */ .octa 0x0000000056d300000000000052550000 /* x^260160 mod p(x), x^260096 mod p(x) */ .octa 0x00000000ee67000000000000a1e40000 /* x^259136 mod p(x), x^259072 mod p(x) */ .octa 0x0000000060830000000000004ad10000 /* x^258112 mod p(x), x^258048 mod p(x) */ .octa 0x000000008cfe0000000000009ab40000 /* x^257088 mod p(x), x^257024 mod p(x) */ .octa 0x000000003e93000000000000fdb50000 /* x^256064 mod p(x), x^256000 mod p(x) */ .octa 0x000000003c2000000000000045480000 /* x^255040 mod p(x), x^254976 mod p(x) */ .octa 0x00000000b1fc0000000000008d690000 /* x^254016 mod p(x), x^253952 mod p(x) */ .octa 0x00000000f82b00000000000024ad0000 /* x^252992 mod p(x), x^252928 mod p(x) */ .octa 0x0000000044420000000000009f1a0000 /* x^251968 mod p(x), x^251904 mod p(x) */ .octa 0x00000000e88c00000000000066ec0000 /* x^250944 mod p(x), x^250880 mod p(x) */ .octa 0x00000000385c000000000000c87d0000 /* x^249920 mod p(x), x^249856 mod p(x) */ .octa 0x000000003227000000000000c8ff0000 /* x^248896 mod p(x), x^248832 mod p(x) */ .octa 0x00000000a9a900000000000033440000 /* x^247872 mod p(x), x^247808 mod p(x) */ .octa 0x00000000abaa00000000000066eb0000 /* x^246848 mod p(x), x^246784 mod p(x) */ .octa 0x000000001ac3000000000000c4ef0000 /* x^245824 mod p(x), x^245760 mod p(x) */ .octa 0x0000000063f000000000000056f30000 /* x^244800 mod p(x), x^244736 mod p(x) */ .octa 0x0000000032cc00000000000002050000 /* x^243776 mod p(x), x^243712 mod p(x) */ .octa 0x00000000f8b5000000000000568e0000 /* x^242752 mod p(x), x^242688 mod p(x) */ .octa 0x000000008db100000000000064290000 /* x^241728 mod p(x), x^241664 mod p(x) */ .octa 0x0000000059ca0000000000006b660000 /* x^240704 mod p(x), x^240640 mod p(x) */ .octa 0x000000005f5c00000000000018f80000 /* x^239680 mod p(x), x^239616 mod p(x) */ .octa 0x0000000061af000000000000b6090000 /* x^238656 mod p(x), x^238592 mod p(x) */ .octa 0x00000000e29e000000000000099a0000 /* x^237632 mod p(x), x^237568 mod p(x) */ .octa 0x000000000975000000000000a8360000 /* x^236608 mod p(x), x^236544 mod p(x) */ .octa 0x0000000043900000000000004f570000 /* x^235584 mod p(x), x^235520 mod p(x) */ .octa 0x00000000f9cd000000000000134c0000 /* x^234560 mod p(x), x^234496 mod p(x) */ .octa 0x000000007c29000000000000ec380000 /* x^233536 mod p(x), x^233472 mod p(x) */ .octa 0x000000004c6a000000000000b0d10000 /* x^232512 mod p(x), x^232448 mod p(x) */ .octa 0x00000000e7290000000000007d3e0000 /* x^231488 mod p(x), x^231424 mod p(x) */ .octa 0x00000000f1ab000000000000f0b20000 /* x^230464 mod p(x), x^230400 mod p(x) */ .octa 0x0000000039db0000000000009c270000 /* x^229440 mod p(x), x^229376 mod p(x) */ .octa 0x000000005e2800000000000092890000 /* x^228416 mod p(x), x^228352 mod p(x) */ .octa 0x00000000d44e000000000000d5ee0000 /* x^227392 mod p(x), x^227328 mod p(x) */ .octa 0x00000000cd0a00000000000041f50000 /* x^226368 mod p(x), x^226304 mod p(x) */ .octa 0x00000000c5b400000000000010520000 /* x^225344 mod p(x), x^225280 mod p(x) */ .octa 0x00000000fd2100000000000042170000 /* x^224320 mod p(x), x^224256 mod p(x) */ .octa 0x000000002f2500000000000095c20000 /* x^223296 mod p(x), x^223232 mod p(x) */ .octa 0x000000001b0100000000000001ce0000 /* x^222272 mod p(x), x^222208 mod p(x) */ .octa 0x000000000d430000000000002aca0000 /* x^221248 mod p(x), x^221184 mod p(x) */ .octa 0x0000000030a6000000000000385e0000 /* x^220224 mod p(x), x^220160 mod p(x) */ .octa 0x00000000e37b0000000000006f7a0000 /* x^219200 mod p(x), x^219136 mod p(x) */ .octa 0x00000000873600000000000024320000 /* x^218176 mod p(x), x^218112 mod p(x) */ .octa 0x00000000e9fb000000000000bd9c0000 /* x^217152 mod p(x), x^217088 mod p(x) */ .octa 0x000000003b9500000000000054bc0000 /* x^216128 mod p(x), x^216064 mod p(x) */ .octa 0x00000000133e000000000000a4660000 /* x^215104 mod p(x), x^215040 mod p(x) */ .octa 0x00000000784500000000000079930000 /* x^214080 mod p(x), x^214016 mod p(x) */ .octa 0x00000000b9800000000000001bb80000 /* x^213056 mod p(x), x^212992 mod p(x) */ .octa 0x00000000687600000000000024400000 /* x^212032 mod p(x), x^211968 mod p(x) */ .octa 0x00000000aff300000000000029e10000 /* x^211008 mod p(x), x^210944 mod p(x) */ .octa 0x0000000024b50000000000005ded0000 /* x^209984 mod p(x), x^209920 mod p(x) */ .octa 0x0000000017e8000000000000b12e0000 /* x^208960 mod p(x), x^208896 mod p(x) */ .octa 0x00000000128400000000000026d20000 /* x^207936 mod p(x), x^207872 mod p(x) */ .octa 0x000000002115000000000000a32a0000 /* x^206912 mod p(x), x^206848 mod p(x) */ .octa 0x000000009595000000000000a1210000 /* x^205888 mod p(x), x^205824 mod p(x) */ .octa 0x00000000281e000000000000ee8b0000 /* x^204864 mod p(x), x^204800 mod p(x) */ .octa 0x0000000006010000000000003d0d0000 /* x^203840 mod p(x), x^203776 mod p(x) */ .octa 0x00000000e2b600000000000034e90000 /* x^202816 mod p(x), x^202752 mod p(x) */ .octa 0x000000001bd40000000000004cdb0000 /* x^201792 mod p(x), x^201728 mod p(x) */ .octa 0x00000000df2800000000000030e90000 /* x^200768 mod p(x), x^200704 mod p(x) */ .octa 0x0000000049c200000000000042590000 /* x^199744 mod p(x), x^199680 mod p(x) */ .octa 0x000000009b97000000000000df950000 /* x^198720 mod p(x), x^198656 mod p(x) */ .octa 0x000000006184000000000000da7b0000 /* x^197696 mod p(x), x^197632 mod p(x) */ .octa 0x00000000461700000000000012510000 /* x^196672 mod p(x), x^196608 mod p(x) */ .octa 0x000000009b40000000000000f37e0000 /* x^195648 mod p(x), x^195584 mod p(x) */ .octa 0x00000000eeb2000000000000ecf10000 /* x^194624 mod p(x), x^194560 mod p(x) */ .octa 0x00000000b2e800000000000050f20000 /* x^193600 mod p(x), x^193536 mod p(x) */ .octa 0x00000000f59a000000000000e0b30000 /* x^192576 mod p(x), x^192512 mod p(x) */ .octa 0x00000000467f0000000000004d5a0000 /* x^191552 mod p(x), x^191488 mod p(x) */ .octa 0x00000000da92000000000000bb010000 /* x^190528 mod p(x), x^190464 mod p(x) */ .octa 0x000000001e1000000000000022a40000 /* x^189504 mod p(x), x^189440 mod p(x) */ .octa 0x0000000058fe000000000000836f0000 /* x^188480 mod p(x), x^188416 mod p(x) */ .octa 0x00000000b9ce000000000000d78d0000 /* x^187456 mod p(x), x^187392 mod p(x) */ .octa 0x0000000022210000000000004f8d0000 /* x^186432 mod p(x), x^186368 mod p(x) */ .octa 0x00000000744600000000000033760000 /* x^185408 mod p(x), x^185344 mod p(x) */ .octa 0x000000001c2e000000000000a1e50000 /* x^184384 mod p(x), x^184320 mod p(x) */ .octa 0x00000000dcc8000000000000a1a40000 /* x^183360 mod p(x), x^183296 mod p(x) */ .octa 0x00000000910f00000000000019a20000 /* x^182336 mod p(x), x^182272 mod p(x) */ .octa 0x0000000055d5000000000000f6ae0000 /* x^181312 mod p(x), x^181248 mod p(x) */ .octa 0x00000000c8ba000000000000a7ac0000 /* x^180288 mod p(x), x^180224 mod p(x) */ .octa 0x0000000031f8000000000000eea20000 /* x^179264 mod p(x), x^179200 mod p(x) */ .octa 0x000000001966000000000000c4d90000 /* x^178240 mod p(x), x^178176 mod p(x) */ .octa 0x00000000b9810000000000002b470000 /* x^177216 mod p(x), x^177152 mod p(x) */ .octa 0x000000008303000000000000f7cf0000 /* x^176192 mod p(x), x^176128 mod p(x) */ .octa 0x000000002ce500000000000035b30000 /* x^175168 mod p(x), x^175104 mod p(x) */ .octa 0x000000002fae0000000000000c7c0000 /* x^174144 mod p(x), x^174080 mod p(x) */ .octa 0x00000000f50c0000000000009edf0000 /* x^173120 mod p(x), x^173056 mod p(x) */ .octa 0x00000000714f00000000000004cd0000 /* x^172096 mod p(x), x^172032 mod p(x) */ .octa 0x00000000c161000000000000541b0000 /* x^171072 mod p(x), x^171008 mod p(x) */ .octa 0x0000000021c8000000000000e2700000 /* x^170048 mod p(x), x^169984 mod p(x) */ .octa 0x00000000b93d00000000000009a60000 /* x^169024 mod p(x), x^168960 mod p(x) */ .octa 0x00000000fbcf000000000000761c0000 /* x^168000 mod p(x), x^167936 mod p(x) */ .octa 0x0000000026350000000000009db30000 /* x^166976 mod p(x), x^166912 mod p(x) */ .octa 0x00000000b64f0000000000003e9f0000 /* x^165952 mod p(x), x^165888 mod p(x) */ .octa 0x00000000bd0e00000000000078590000 /* x^164928 mod p(x), x^164864 mod p(x) */ .octa 0x00000000d9360000000000008bc80000 /* x^163904 mod p(x), x^163840 mod p(x) */ .octa 0x000000002f140000000000008c9f0000 /* x^162880 mod p(x), x^162816 mod p(x) */ .octa 0x000000006a270000000000006af70000 /* x^161856 mod p(x), x^161792 mod p(x) */ .octa 0x000000006685000000000000e5210000 /* x^160832 mod p(x), x^160768 mod p(x) */ .octa 0x0000000062da00000000000008290000 /* x^159808 mod p(x), x^159744 mod p(x) */ .octa 0x00000000bb4b000000000000e4d00000 /* x^158784 mod p(x), x^158720 mod p(x) */ .octa 0x00000000d2490000000000004ae10000 /* x^157760 mod p(x), x^157696 mod p(x) */ .octa 0x00000000c85b00000000000000e70000 /* x^156736 mod p(x), x^156672 mod p(x) */ .octa 0x00000000c37a00000000000015650000 /* x^155712 mod p(x), x^155648 mod p(x) */ .octa 0x0000000018530000000000001c2f0000 /* x^154688 mod p(x), x^154624 mod p(x) */ .octa 0x00000000b46600000000000037bd0000 /* x^153664 mod p(x), x^153600 mod p(x) */ .octa 0x00000000439b00000000000012190000 /* x^152640 mod p(x), x^152576 mod p(x) */ .octa 0x00000000b1260000000000005ece0000 /* x^151616 mod p(x), x^151552 mod p(x) */ .octa 0x00000000d8110000000000002a5e0000 /* x^150592 mod p(x), x^150528 mod p(x) */ .octa 0x00000000099f00000000000052330000 /* x^149568 mod p(x), x^149504 mod p(x) */ .octa 0x00000000f9f9000000000000f9120000 /* x^148544 mod p(x), x^148480 mod p(x) */ .octa 0x000000005cc00000000000000ddc0000 /* x^147520 mod p(x), x^147456 mod p(x) */ .octa 0x00000000343b00000000000012200000 /* x^146496 mod p(x), x^146432 mod p(x) */ .octa 0x000000009222000000000000d12b0000 /* x^145472 mod p(x), x^145408 mod p(x) */ .octa 0x00000000d781000000000000eb2d0000 /* x^144448 mod p(x), x^144384 mod p(x) */ .octa 0x000000000bf400000000000058970000 /* x^143424 mod p(x), x^143360 mod p(x) */ .octa 0x00000000094200000000000013690000 /* x^142400 mod p(x), x^142336 mod p(x) */ .octa 0x00000000d55100000000000051950000 /* x^141376 mod p(x), x^141312 mod p(x) */ .octa 0x000000008f11000000000000954b0000 /* x^140352 mod p(x), x^140288 mod p(x) */ .octa 0x00000000140f000000000000b29e0000 /* x^139328 mod p(x), x^139264 mod p(x) */ .octa 0x00000000c6db000000000000db5d0000 /* x^138304 mod p(x), x^138240 mod p(x) */ .octa 0x00000000715b000000000000dfaf0000 /* x^137280 mod p(x), x^137216 mod p(x) */ .octa 0x000000000dea000000000000e3b60000 /* x^136256 mod p(x), x^136192 mod p(x) */ .octa 0x000000006f94000000000000ddaf0000 /* x^135232 mod p(x), x^135168 mod p(x) */ .octa 0x0000000024e1000000000000e4f70000 /* x^134208 mod p(x), x^134144 mod p(x) */ .octa 0x000000008810000000000000aa110000 /* x^133184 mod p(x), x^133120 mod p(x) */ .octa 0x0000000030c2000000000000a8e60000 /* x^132160 mod p(x), x^132096 mod p(x) */ .octa 0x00000000e6d0000000000000ccf30000 /* x^131136 mod p(x), x^131072 mod p(x) */ .octa 0x000000004da000000000000079bf0000 /* x^130112 mod p(x), x^130048 mod p(x) */ .octa 0x000000007759000000000000b3a30000 /* x^129088 mod p(x), x^129024 mod p(x) */ .octa 0x00000000597400000000000028790000 /* x^128064 mod p(x), x^128000 mod p(x) */ .octa 0x000000007acd000000000000b5820000 /* x^127040 mod p(x), x^126976 mod p(x) */ .octa 0x00000000e6e400000000000026ad0000 /* x^126016 mod p(x), x^125952 mod p(x) */ .octa 0x000000006d49000000000000985b0000 /* x^124992 mod p(x), x^124928 mod p(x) */ .octa 0x000000000f0800000000000011520000 /* x^123968 mod p(x), x^123904 mod p(x) */ .octa 0x000000002c7f000000000000846c0000 /* x^122944 mod p(x), x^122880 mod p(x) */ .octa 0x000000005ce7000000000000ae1d0000 /* x^121920 mod p(x), x^121856 mod p(x) */ .octa 0x00000000d4cb000000000000e21d0000 /* x^120896 mod p(x), x^120832 mod p(x) */ .octa 0x000000003a2300000000000019bb0000 /* x^119872 mod p(x), x^119808 mod p(x) */ .octa 0x000000000e1700000000000095290000 /* x^118848 mod p(x), x^118784 mod p(x) */ .octa 0x000000006e6400000000000050d20000 /* x^117824 mod p(x), x^117760 mod p(x) */ .octa 0x000000008d5c0000000000000cd10000 /* x^116800 mod p(x), x^116736 mod p(x) */ .octa 0x00000000ef310000000000007b570000 /* x^115776 mod p(x), x^115712 mod p(x) */ .octa 0x00000000645d00000000000053d60000 /* x^114752 mod p(x), x^114688 mod p(x) */ .octa 0x0000000018fc00000000000077510000 /* x^113728 mod p(x), x^113664 mod p(x) */ .octa 0x000000000cb3000000000000a7b70000 /* x^112704 mod p(x), x^112640 mod p(x) */ .octa 0x00000000991b000000000000d0780000 /* x^111680 mod p(x), x^111616 mod p(x) */ .octa 0x00000000845a000000000000be3c0000 /* x^110656 mod p(x), x^110592 mod p(x) */ .octa 0x00000000d3a9000000000000df020000 /* x^109632 mod p(x), x^109568 mod p(x) */ .octa 0x0000000017d7000000000000063e0000 /* x^108608 mod p(x), x^108544 mod p(x) */ .octa 0x000000007a860000000000008ab40000 /* x^107584 mod p(x), x^107520 mod p(x) */ .octa 0x00000000fd7c000000000000c7bd0000 /* x^106560 mod p(x), x^106496 mod p(x) */ .octa 0x00000000a56b000000000000efd60000 /* x^105536 mod p(x), x^105472 mod p(x) */ .octa 0x0000000010e400000000000071380000 /* x^104512 mod p(x), x^104448 mod p(x) */ .octa 0x00000000994500000000000004d30000 /* x^103488 mod p(x), x^103424 mod p(x) */ .octa 0x00000000b83c0000000000003b0e0000 /* x^102464 mod p(x), x^102400 mod p(x) */ .octa 0x00000000d6c10000000000008b020000 /* x^101440 mod p(x), x^101376 mod p(x) */ .octa 0x000000009efc000000000000da940000 /* x^100416 mod p(x), x^100352 mod p(x) */ .octa 0x000000005e87000000000000f9f70000 /* x^99392 mod p(x), x^99328 mod p(x) */ .octa 0x000000006c9b00000000000045e40000 /* x^98368 mod p(x), x^98304 mod p(x) */ .octa 0x00000000178a00000000000083940000 /* x^97344 mod p(x), x^97280 mod p(x) */ .octa 0x00000000f0c8000000000000f0a00000 /* x^96320 mod p(x), x^96256 mod p(x) */ .octa 0x00000000f699000000000000b74b0000 /* x^95296 mod p(x), x^95232 mod p(x) */ .octa 0x00000000316d000000000000c1cf0000 /* x^94272 mod p(x), x^94208 mod p(x) */ .octa 0x00000000987e00000000000072680000 /* x^93248 mod p(x), x^93184 mod p(x) */ .octa 0x00000000acff000000000000e0ab0000 /* x^92224 mod p(x), x^92160 mod p(x) */ .octa 0x00000000a1f6000000000000c5a80000 /* x^91200 mod p(x), x^91136 mod p(x) */ .octa 0x0000000061bd000000000000cf690000 /* x^90176 mod p(x), x^90112 mod p(x) */ .octa 0x00000000c9f2000000000000cbcc0000 /* x^89152 mod p(x), x^89088 mod p(x) */ .octa 0x000000005a33000000000000de050000 /* x^88128 mod p(x), x^88064 mod p(x) */ .octa 0x00000000e416000000000000ccd70000 /* x^87104 mod p(x), x^87040 mod p(x) */ .octa 0x0000000058930000000000002f670000 /* x^86080 mod p(x), x^86016 mod p(x) */ .octa 0x00000000a9d3000000000000152f0000 /* x^85056 mod p(x), x^84992 mod p(x) */ .octa 0x00000000c114000000000000ecc20000 /* x^84032 mod p(x), x^83968 mod p(x) */ .octa 0x00000000b9270000000000007c890000 /* x^83008 mod p(x), x^82944 mod p(x) */ .octa 0x000000002e6000000000000006ee0000 /* x^81984 mod p(x), x^81920 mod p(x) */ .octa 0x00000000dfc600000000000009100000 /* x^80960 mod p(x), x^80896 mod p(x) */ .octa 0x000000004911000000000000ad4e0000 /* x^79936 mod p(x), x^79872 mod p(x) */ .octa 0x00000000ae1b000000000000b04d0000 /* x^78912 mod p(x), x^78848 mod p(x) */ .octa 0x0000000005fa000000000000e9900000 /* x^77888 mod p(x), x^77824 mod p(x) */ .octa 0x0000000004a1000000000000cc6f0000 /* x^76864 mod p(x), x^76800 mod p(x) */ .octa 0x00000000af73000000000000ed110000 /* x^75840 mod p(x), x^75776 mod p(x) */ .octa 0x0000000082530000000000008f7e0000 /* x^74816 mod p(x), x^74752 mod p(x) */ .octa 0x00000000cfdc000000000000594f0000 /* x^73792 mod p(x), x^73728 mod p(x) */ .octa 0x00000000a6b6000000000000a8750000 /* x^72768 mod p(x), x^72704 mod p(x) */ .octa 0x00000000fd76000000000000aa0c0000 /* x^71744 mod p(x), x^71680 mod p(x) */ .octa 0x0000000006f500000000000071db0000 /* x^70720 mod p(x), x^70656 mod p(x) */ .octa 0x0000000037ca000000000000ab0c0000 /* x^69696 mod p(x), x^69632 mod p(x) */ .octa 0x00000000d7ab000000000000b7a00000 /* x^68672 mod p(x), x^68608 mod p(x) */ .octa 0x00000000440800000000000090d30000 /* x^67648 mod p(x), x^67584 mod p(x) */ .octa 0x00000000186100000000000054730000 /* x^66624 mod p(x), x^66560 mod p(x) */ .octa 0x000000007368000000000000a3a20000 /* x^65600 mod p(x), x^65536 mod p(x) */ .octa 0x0000000026d0000000000000f9040000 /* x^64576 mod p(x), x^64512 mod p(x) */ .octa 0x00000000fe770000000000009c0a0000 /* x^63552 mod p(x), x^63488 mod p(x) */ .octa 0x000000002cba000000000000d1e70000 /* x^62528 mod p(x), x^62464 mod p(x) */ .octa 0x00000000f8bd0000000000005ac10000 /* x^61504 mod p(x), x^61440 mod p(x) */ .octa 0x000000007372000000000000d68d0000 /* x^60480 mod p(x), x^60416 mod p(x) */ .octa 0x00000000f37f00000000000089f60000 /* x^59456 mod p(x), x^59392 mod p(x) */ .octa 0x00000000078400000000000008a90000 /* x^58432 mod p(x), x^58368 mod p(x) */ .octa 0x00000000d3e400000000000042360000 /* x^57408 mod p(x), x^57344 mod p(x) */ .octa 0x00000000eba800000000000092d50000 /* x^56384 mod p(x), x^56320 mod p(x) */ .octa 0x00000000afbe000000000000b4d50000 /* x^55360 mod p(x), x^55296 mod p(x) */ .octa 0x00000000d8ca000000000000c9060000 /* x^54336 mod p(x), x^54272 mod p(x) */ .octa 0x00000000c2d00000000000008f4f0000 /* x^53312 mod p(x), x^53248 mod p(x) */ .octa 0x00000000373200000000000028690000 /* x^52288 mod p(x), x^52224 mod p(x) */ .octa 0x0000000046ae000000000000c3b30000 /* x^51264 mod p(x), x^51200 mod p(x) */ .octa 0x00000000b243000000000000f8700000 /* x^50240 mod p(x), x^50176 mod p(x) */ .octa 0x00000000f7f500000000000029eb0000 /* x^49216 mod p(x), x^49152 mod p(x) */ .octa 0x000000000c7e000000000000fe730000 /* x^48192 mod p(x), x^48128 mod p(x) */ .octa 0x00000000c38200000000000096000000 /* x^47168 mod p(x), x^47104 mod p(x) */ .octa 0x000000008956000000000000683c0000 /* x^46144 mod p(x), x^46080 mod p(x) */ .octa 0x00000000422d0000000000005f1e0000 /* x^45120 mod p(x), x^45056 mod p(x) */ .octa 0x00000000ac0f0000000000006f810000 /* x^44096 mod p(x), x^44032 mod p(x) */ .octa 0x00000000ce30000000000000031f0000 /* x^43072 mod p(x), x^43008 mod p(x) */ .octa 0x000000003d43000000000000455a0000 /* x^42048 mod p(x), x^41984 mod p(x) */ .octa 0x000000007ebe000000000000a6050000 /* x^41024 mod p(x), x^40960 mod p(x) */ .octa 0x00000000976e00000000000077eb0000 /* x^40000 mod p(x), x^39936 mod p(x) */ .octa 0x000000000872000000000000389c0000 /* x^38976 mod p(x), x^38912 mod p(x) */ .octa 0x000000008979000000000000c7b20000 /* x^37952 mod p(x), x^37888 mod p(x) */ .octa 0x000000005c1e0000000000001d870000 /* x^36928 mod p(x), x^36864 mod p(x) */ .octa 0x00000000aebb00000000000045810000 /* x^35904 mod p(x), x^35840 mod p(x) */ .octa 0x000000004f7e0000000000006d4a0000 /* x^34880 mod p(x), x^34816 mod p(x) */ .octa 0x00000000ea98000000000000b9200000 /* x^33856 mod p(x), x^33792 mod p(x) */ .octa 0x00000000f39600000000000022f20000 /* x^32832 mod p(x), x^32768 mod p(x) */ .octa 0x000000000bc500000000000041ca0000 /* x^31808 mod p(x), x^31744 mod p(x) */ .octa 0x00000000786400000000000078500000 /* x^30784 mod p(x), x^30720 mod p(x) */ .octa 0x00000000be970000000000009e7e0000 /* x^29760 mod p(x), x^29696 mod p(x) */ .octa 0x00000000dd6d000000000000a53c0000 /* x^28736 mod p(x), x^28672 mod p(x) */ .octa 0x000000004c3f00000000000039340000 /* x^27712 mod p(x), x^27648 mod p(x) */ .octa 0x0000000093a4000000000000b58e0000 /* x^26688 mod p(x), x^26624 mod p(x) */ .octa 0x0000000050fb00000000000062d40000 /* x^25664 mod p(x), x^25600 mod p(x) */ .octa 0x00000000f505000000000000a26f0000 /* x^24640 mod p(x), x^24576 mod p(x) */ .octa 0x0000000064f900000000000065e60000 /* x^23616 mod p(x), x^23552 mod p(x) */ .octa 0x00000000e8c2000000000000aad90000 /* x^22592 mod p(x), x^22528 mod p(x) */ .octa 0x00000000720b000000000000a3b00000 /* x^21568 mod p(x), x^21504 mod p(x) */ .octa 0x00000000e992000000000000d2680000 /* x^20544 mod p(x), x^20480 mod p(x) */ .octa 0x000000009132000000000000cf4c0000 /* x^19520 mod p(x), x^19456 mod p(x) */ .octa 0x00000000608a00000000000076610000 /* x^18496 mod p(x), x^18432 mod p(x) */ .octa 0x000000009948000000000000fb9f0000 /* x^17472 mod p(x), x^17408 mod p(x) */ .octa 0x00000000173000000000000003770000 /* x^16448 mod p(x), x^16384 mod p(x) */ .octa 0x000000006fe300000000000004880000 /* x^15424 mod p(x), x^15360 mod p(x) */ .octa 0x00000000e15300000000000056a70000 /* x^14400 mod p(x), x^14336 mod p(x) */ .octa 0x0000000092d60000000000009dfd0000 /* x^13376 mod p(x), x^13312 mod p(x) */ .octa 0x0000000002fd00000000000074c80000 /* x^12352 mod p(x), x^12288 mod p(x) */ .octa 0x00000000c78b000000000000a3ec0000 /* x^11328 mod p(x), x^11264 mod p(x) */ .octa 0x000000009262000000000000b3530000 /* x^10304 mod p(x), x^10240 mod p(x) */ .octa 0x0000000084f200000000000047bf0000 /* x^9280 mod p(x), x^9216 mod p(x) */ .octa 0x0000000067ee000000000000e97c0000 /* x^8256 mod p(x), x^8192 mod p(x) */ .octa 0x00000000535b00000000000091e10000 /* x^7232 mod p(x), x^7168 mod p(x) */ .octa 0x000000007ebb00000000000055060000 /* x^6208 mod p(x), x^6144 mod p(x) */ .octa 0x00000000c6a1000000000000fd360000 /* x^5184 mod p(x), x^5120 mod p(x) */ .octa 0x000000001be500000000000055860000 /* x^4160 mod p(x), x^4096 mod p(x) */ .octa 0x00000000ae0e0000000000005bd00000 /* x^3136 mod p(x), x^3072 mod p(x) */ .octa 0x0000000022040000000000008db20000 /* x^2112 mod p(x), x^2048 mod p(x) */ .octa 0x00000000c9eb000000000000efe20000 /* x^1088 mod p(x), x^1024 mod p(x) */ .octa 0x0000000039b400000000000051d10000 .short_constants: /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */ /* x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) */ .octa 0xefe20000dccf00009440000033590000 /* x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) */ .octa 0xee6300002f3f000062180000e0ed0000 /* x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) */ .octa 0xcf5f000017ef0000ccbe000023d30000 /* x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) */ .octa 0x6d0c0000a30e00000920000042630000 /* x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) */ .octa 0x21d30000932b0000a7a00000efcc0000 /* x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) */ .octa 0x10be00000b310000666f00000d1c0000 /* x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) */ .octa 0x1f240000ce9e0000caad0000589e0000 /* x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) */ .octa 0x29610000d02b000039b400007cf50000 /* x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) */ .octa 0x51d100009d9d00003c0e0000bfd60000 /* x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) */ .octa 0xda390000ceae000013830000713c0000 /* x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) */ .octa 0xb67800001e16000085c0000080a60000 /* x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) */ .octa 0x0db40000f7f90000371d0000e6580000 /* x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) */ .octa 0x87e70000044c0000aadb0000a4970000 /* x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) */ .octa 0x1f990000ad180000d8b30000e7b50000 /* x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) */ .octa 0xbe6c00006ee300004c1a000006df0000 /* x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) */ .octa 0xfb0b00002d560000136800008bb70000 .barrett_constants: /* Barrett constant m - (4^32)/n */ .octa 0x000000000000000000000001f65a57f8 /* x^64 div p(x) */ /* Barrett constant n */ .octa 0x0000000000000000000000018bb70000 #define CRC_FUNCTION_NAME __crct10dif_vpmsum #include "crc32-vpmsum_core.S"
aixcc-public/challenge-001-exemplar-source
7,630
arch/powerpc/crypto/aes-spe-core.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Fast AES implementation for SPE instruction set (PPC) * * This code makes use of the SPE SIMD instruction set as defined in * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf * Implementation is based on optimization guide notes from * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf * * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ #include <asm/ppc_asm.h> #include "aes-spe-regs.h" #define EAD(in, bpos) \ rlwimi rT0,in,28-((bpos+3)%4)*8,20,27; #define DAD(in, bpos) \ rlwimi rT1,in,24-((bpos+3)%4)*8,24,31; #define LWH(out, off) \ evlwwsplat out,off(rT0); /* load word high */ #define LWL(out, off) \ lwz out,off(rT0); /* load word low */ #define LBZ(out, tab, off) \ lbz out,off(tab); /* load byte */ #define LAH(out, in, bpos, off) \ EAD(in, bpos) /* calc addr + load word high */ \ LWH(out, off) #define LAL(out, in, bpos, off) \ EAD(in, bpos) /* calc addr + load word low */ \ LWL(out, off) #define LAE(out, in, bpos) \ EAD(in, bpos) /* calc addr + load enc byte */ \ LBZ(out, rT0, 8) #define LBE(out) \ LBZ(out, rT0, 8) /* load enc byte */ #define LAD(out, in, bpos) \ DAD(in, bpos) /* calc addr + load dec byte */ \ LBZ(out, rT1, 0) #define LBD(out) \ LBZ(out, rT1, 0) /* * ppc_encrypt_block: The central encryption function for a single 16 bytes * block. It does no stack handling or register saving to support fast calls * via bl/blr. It expects that caller has pre-xored input data with first * 4 words of encryption key into rD0-rD3. Pointer/counter registers must * have also been set up before (rT0, rKP, CTR). Output is stored in rD0-rD3 * and rW0-rW3 and caller must execute a final xor on the output registers. * All working registers rD0-rD3 & rW0-rW7 are overwritten during processing. * */ _GLOBAL(ppc_encrypt_block) LAH(rW4, rD1, 2, 4) LAH(rW6, rD0, 3, 0) LAH(rW3, rD0, 1, 8) ppc_encrypt_block_loop: LAH(rW0, rD3, 0, 12) LAL(rW0, rD0, 0, 12) LAH(rW1, rD1, 0, 12) LAH(rW2, rD2, 1, 8) LAL(rW2, rD3, 1, 8) LAL(rW3, rD1, 1, 8) LAL(rW4, rD2, 2, 4) LAL(rW6, rD1, 3, 0) LAH(rW5, rD3, 2, 4) LAL(rW5, rD0, 2, 4) LAH(rW7, rD2, 3, 0) evldw rD1,16(rKP) EAD(rD3, 3) evxor rW2,rW2,rW4 LWL(rW7, 0) evxor rW2,rW2,rW6 EAD(rD2, 0) evxor rD1,rD1,rW2 LWL(rW1, 12) evxor rD1,rD1,rW0 evldw rD3,24(rKP) evmergehi rD0,rD0,rD1 EAD(rD1, 2) evxor rW3,rW3,rW5 LWH(rW4, 4) evxor rW3,rW3,rW7 EAD(rD0, 3) evxor rD3,rD3,rW3 LWH(rW6, 0) evxor rD3,rD3,rW1 EAD(rD0, 1) evmergehi rD2,rD2,rD3 LWH(rW3, 8) LAH(rW0, rD3, 0, 12) LAL(rW0, rD0, 0, 12) LAH(rW1, rD1, 0, 12) LAH(rW2, rD2, 1, 8) LAL(rW2, rD3, 1, 8) LAL(rW3, rD1, 1, 8) LAL(rW4, rD2, 2, 4) LAL(rW6, rD1, 3, 0) LAH(rW5, rD3, 2, 4) LAL(rW5, rD0, 2, 4) LAH(rW7, rD2, 3, 0) evldw rD1,32(rKP) EAD(rD3, 3) evxor rW2,rW2,rW4 LWL(rW7, 0) evxor rW2,rW2,rW6 EAD(rD2, 0) evxor rD1,rD1,rW2 LWL(rW1, 12) evxor rD1,rD1,rW0 evldw rD3,40(rKP) evmergehi rD0,rD0,rD1 EAD(rD1, 2) evxor rW3,rW3,rW5 LWH(rW4, 4) evxor rW3,rW3,rW7 EAD(rD0, 3) evxor rD3,rD3,rW3 LWH(rW6, 0) evxor rD3,rD3,rW1 EAD(rD0, 1) evmergehi rD2,rD2,rD3 LWH(rW3, 8) addi rKP,rKP,32 bdnz ppc_encrypt_block_loop LAH(rW0, rD3, 0, 12) LAL(rW0, rD0, 0, 12) LAH(rW1, rD1, 0, 12) LAH(rW2, rD2, 1, 8) LAL(rW2, rD3, 1, 8) LAL(rW3, rD1, 1, 8) LAL(rW4, rD2, 2, 4) LAH(rW5, rD3, 2, 4) LAL(rW6, rD1, 3, 0) LAL(rW5, rD0, 2, 4) LAH(rW7, rD2, 3, 0) evldw rD1,16(rKP) EAD(rD3, 3) evxor rW2,rW2,rW4 LWL(rW7, 0) evxor rW2,rW2,rW6 EAD(rD2, 0) evxor rD1,rD1,rW2 LWL(rW1, 12) evxor rD1,rD1,rW0 evldw rD3,24(rKP) evmergehi rD0,rD0,rD1 EAD(rD1, 0) evxor rW3,rW3,rW5 LBE(rW2) evxor rW3,rW3,rW7 EAD(rD0, 1) evxor rD3,rD3,rW3 LBE(rW6) evxor rD3,rD3,rW1 EAD(rD0, 0) evmergehi rD2,rD2,rD3 LBE(rW1) LAE(rW0, rD3, 0) LAE(rW1, rD0, 0) LAE(rW4, rD2, 1) LAE(rW5, rD3, 1) LAE(rW3, rD2, 0) LAE(rW7, rD1, 1) rlwimi rW0,rW4,8,16,23 rlwimi rW1,rW5,8,16,23 LAE(rW4, rD1, 2) LAE(rW5, rD2, 2) rlwimi rW2,rW6,8,16,23 rlwimi rW3,rW7,8,16,23 LAE(rW6, rD3, 2) LAE(rW7, rD0, 2) rlwimi rW0,rW4,16,8,15 rlwimi rW1,rW5,16,8,15 LAE(rW4, rD0, 3) LAE(rW5, rD1, 3) rlwimi rW2,rW6,16,8,15 lwz rD0,32(rKP) rlwimi rW3,rW7,16,8,15 lwz rD1,36(rKP) LAE(rW6, rD2, 3) LAE(rW7, rD3, 3) rlwimi rW0,rW4,24,0,7 lwz rD2,40(rKP) rlwimi rW1,rW5,24,0,7 lwz rD3,44(rKP) rlwimi rW2,rW6,24,0,7 rlwimi rW3,rW7,24,0,7 blr /* * ppc_decrypt_block: The central decryption function for a single 16 bytes * block. It does no stack handling or register saving to support fast calls * via bl/blr. It expects that caller has pre-xored input data with first * 4 words of encryption key into rD0-rD3. Pointer/counter registers must * have also been set up before (rT0, rKP, CTR). Output is stored in rD0-rD3 * and rW0-rW3 and caller must execute a final xor on the output registers. * All working registers rD0-rD3 & rW0-rW7 are overwritten during processing. * */ _GLOBAL(ppc_decrypt_block) LAH(rW0, rD1, 0, 12) LAH(rW6, rD0, 3, 0) LAH(rW3, rD0, 1, 8) ppc_decrypt_block_loop: LAH(rW1, rD3, 0, 12) LAL(rW0, rD2, 0, 12) LAH(rW2, rD2, 1, 8) LAL(rW2, rD3, 1, 8) LAH(rW4, rD3, 2, 4) LAL(rW4, rD0, 2, 4) LAL(rW6, rD1, 3, 0) LAH(rW5, rD1, 2, 4) LAH(rW7, rD2, 3, 0) LAL(rW7, rD3, 3, 0) LAL(rW3, rD1, 1, 8) evldw rD1,16(rKP) EAD(rD0, 0) evxor rW4,rW4,rW6 LWL(rW1, 12) evxor rW0,rW0,rW4 EAD(rD2, 2) evxor rW0,rW0,rW2 LWL(rW5, 4) evxor rD1,rD1,rW0 evldw rD3,24(rKP) evmergehi rD0,rD0,rD1 EAD(rD1, 0) evxor rW3,rW3,rW7 LWH(rW0, 12) evxor rW3,rW3,rW1 EAD(rD0, 3) evxor rD3,rD3,rW3 LWH(rW6, 0) evxor rD3,rD3,rW5 EAD(rD0, 1) evmergehi rD2,rD2,rD3 LWH(rW3, 8) LAH(rW1, rD3, 0, 12) LAL(rW0, rD2, 0, 12) LAH(rW2, rD2, 1, 8) LAL(rW2, rD3, 1, 8) LAH(rW4, rD3, 2, 4) LAL(rW4, rD0, 2, 4) LAL(rW6, rD1, 3, 0) LAH(rW5, rD1, 2, 4) LAH(rW7, rD2, 3, 0) LAL(rW7, rD3, 3, 0) LAL(rW3, rD1, 1, 8) evldw rD1,32(rKP) EAD(rD0, 0) evxor rW4,rW4,rW6 LWL(rW1, 12) evxor rW0,rW0,rW4 EAD(rD2, 2) evxor rW0,rW0,rW2 LWL(rW5, 4) evxor rD1,rD1,rW0 evldw rD3,40(rKP) evmergehi rD0,rD0,rD1 EAD(rD1, 0) evxor rW3,rW3,rW7 LWH(rW0, 12) evxor rW3,rW3,rW1 EAD(rD0, 3) evxor rD3,rD3,rW3 LWH(rW6, 0) evxor rD3,rD3,rW5 EAD(rD0, 1) evmergehi rD2,rD2,rD3 LWH(rW3, 8) addi rKP,rKP,32 bdnz ppc_decrypt_block_loop LAH(rW1, rD3, 0, 12) LAL(rW0, rD2, 0, 12) LAH(rW2, rD2, 1, 8) LAL(rW2, rD3, 1, 8) LAH(rW4, rD3, 2, 4) LAL(rW4, rD0, 2, 4) LAL(rW6, rD1, 3, 0) LAH(rW5, rD1, 2, 4) LAH(rW7, rD2, 3, 0) LAL(rW7, rD3, 3, 0) LAL(rW3, rD1, 1, 8) evldw rD1,16(rKP) EAD(rD0, 0) evxor rW4,rW4,rW6 LWL(rW1, 12) evxor rW0,rW0,rW4 EAD(rD2, 2) evxor rW0,rW0,rW2 LWL(rW5, 4) evxor rD1,rD1,rW0 evldw rD3,24(rKP) evmergehi rD0,rD0,rD1 DAD(rD1, 0) evxor rW3,rW3,rW7 LBD(rW0) evxor rW3,rW3,rW1 DAD(rD0, 1) evxor rD3,rD3,rW3 LBD(rW6) evxor rD3,rD3,rW5 DAD(rD0, 0) evmergehi rD2,rD2,rD3 LBD(rW3) LAD(rW2, rD3, 0) LAD(rW1, rD2, 0) LAD(rW4, rD2, 1) LAD(rW5, rD3, 1) LAD(rW7, rD1, 1) rlwimi rW0,rW4,8,16,23 rlwimi rW1,rW5,8,16,23 LAD(rW4, rD3, 2) LAD(rW5, rD0, 2) rlwimi rW2,rW6,8,16,23 rlwimi rW3,rW7,8,16,23 LAD(rW6, rD1, 2) LAD(rW7, rD2, 2) rlwimi rW0,rW4,16,8,15 rlwimi rW1,rW5,16,8,15 LAD(rW4, rD0, 3) LAD(rW5, rD1, 3) rlwimi rW2,rW6,16,8,15 lwz rD0,32(rKP) rlwimi rW3,rW7,16,8,15 lwz rD1,36(rKP) LAD(rW6, rD2, 3) LAD(rW7, rD3, 3) rlwimi rW0,rW4,24,0,7 lwz rD2,40(rKP) rlwimi rW1,rW5,24,0,7 lwz rD3,44(rKP) rlwimi rW2,rW6,24,0,7 rlwimi rW3,rW7,24,0,7 blr
aixcc-public/challenge-001-exemplar-source
14,691
arch/powerpc/crypto/aes-spe-modes.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AES modes (ECB/CBC/CTR/XTS) for PPC AES implementation * * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ #include <asm/ppc_asm.h> #include "aes-spe-regs.h" #ifdef __BIG_ENDIAN__ /* Macros for big endian builds */ #define LOAD_DATA(reg, off) \ lwz reg,off(rSP); /* load with offset */ #define SAVE_DATA(reg, off) \ stw reg,off(rDP); /* save with offset */ #define NEXT_BLOCK \ addi rSP,rSP,16; /* increment pointers per bloc */ \ addi rDP,rDP,16; #define LOAD_IV(reg, off) \ lwz reg,off(rIP); /* IV loading with offset */ #define SAVE_IV(reg, off) \ stw reg,off(rIP); /* IV saving with offset */ #define START_IV /* nothing to reset */ #define CBC_DEC 16 /* CBC decrement per block */ #define CTR_DEC 1 /* CTR decrement one byte */ #else /* Macros for little endian */ #define LOAD_DATA(reg, off) \ lwbrx reg,0,rSP; /* load reversed */ \ addi rSP,rSP,4; /* and increment pointer */ #define SAVE_DATA(reg, off) \ stwbrx reg,0,rDP; /* save reversed */ \ addi rDP,rDP,4; /* and increment pointer */ #define NEXT_BLOCK /* nothing todo */ #define LOAD_IV(reg, off) \ lwbrx reg,0,rIP; /* load reversed */ \ addi rIP,rIP,4; /* and increment pointer */ #define SAVE_IV(reg, off) \ stwbrx reg,0,rIP; /* load reversed */ \ addi rIP,rIP,4; /* and increment pointer */ #define START_IV \ subi rIP,rIP,16; /* must reset pointer */ #define CBC_DEC 32 /* 2 blocks because of incs */ #define CTR_DEC 17 /* 1 block because of incs */ #endif #define SAVE_0_REGS #define LOAD_0_REGS #define SAVE_4_REGS \ stw rI0,96(r1); /* save 32 bit registers */ \ stw rI1,100(r1); \ stw rI2,104(r1); \ stw rI3,108(r1); #define LOAD_4_REGS \ lwz rI0,96(r1); /* restore 32 bit registers */ \ lwz rI1,100(r1); \ lwz rI2,104(r1); \ lwz rI3,108(r1); #define SAVE_8_REGS \ SAVE_4_REGS \ stw rG0,112(r1); /* save 32 bit registers */ \ stw rG1,116(r1); \ stw rG2,120(r1); \ stw rG3,124(r1); #define LOAD_8_REGS \ LOAD_4_REGS \ lwz rG0,112(r1); /* restore 32 bit registers */ \ lwz rG1,116(r1); \ lwz rG2,120(r1); \ lwz rG3,124(r1); #define INITIALIZE_CRYPT(tab,nr32bitregs) \ mflr r0; \ stwu r1,-160(r1); /* create stack frame */ \ lis rT0,tab@h; /* en-/decryption table pointer */ \ stw r0,8(r1); /* save link register */ \ ori rT0,rT0,tab@l; \ evstdw r14,16(r1); \ mr rKS,rKP; \ evstdw r15,24(r1); /* We must save non volatile */ \ evstdw r16,32(r1); /* registers. Take the chance */ \ evstdw r17,40(r1); /* and save the SPE part too */ \ evstdw r18,48(r1); \ evstdw r19,56(r1); \ evstdw r20,64(r1); \ evstdw r21,72(r1); \ evstdw r22,80(r1); \ evstdw r23,88(r1); \ SAVE_##nr32bitregs##_REGS #define FINALIZE_CRYPT(nr32bitregs) \ lwz r0,8(r1); \ evldw r14,16(r1); /* restore SPE registers */ \ evldw r15,24(r1); \ evldw r16,32(r1); \ evldw r17,40(r1); \ evldw r18,48(r1); \ evldw r19,56(r1); \ evldw r20,64(r1); \ evldw r21,72(r1); \ evldw r22,80(r1); \ evldw r23,88(r1); \ LOAD_##nr32bitregs##_REGS \ mtlr r0; /* restore link register */ \ xor r0,r0,r0; \ stw r0,16(r1); /* delete sensitive data */ \ stw r0,24(r1); /* that we might have pushed */ \ stw r0,32(r1); /* from other context that runs */ \ stw r0,40(r1); /* the same code */ \ stw r0,48(r1); \ stw r0,56(r1); \ stw r0,64(r1); \ stw r0,72(r1); \ stw r0,80(r1); \ stw r0,88(r1); \ addi r1,r1,160; /* cleanup stack frame */ #define ENDIAN_SWAP(t0, t1, s0, s1) \ rotrwi t0,s0,8; /* swap endianness for 2 GPRs */ \ rotrwi t1,s1,8; \ rlwimi t0,s0,8,8,15; \ rlwimi t1,s1,8,8,15; \ rlwimi t0,s0,8,24,31; \ rlwimi t1,s1,8,24,31; #define GF128_MUL(d0, d1, d2, d3, t0) \ li t0,0x87; /* multiplication in GF128 */ \ cmpwi d3,-1; \ iselgt t0,0,t0; \ rlwimi d3,d2,0,0,0; /* propagate "carry" bits */ \ rotlwi d3,d3,1; \ rlwimi d2,d1,0,0,0; \ rotlwi d2,d2,1; \ rlwimi d1,d0,0,0,0; \ slwi d0,d0,1; /* shift left 128 bit */ \ rotlwi d1,d1,1; \ xor d0,d0,t0; #define START_KEY(d0, d1, d2, d3) \ lwz rW0,0(rKP); \ mtctr rRR; \ lwz rW1,4(rKP); \ lwz rW2,8(rKP); \ lwz rW3,12(rKP); \ xor rD0,d0,rW0; \ xor rD1,d1,rW1; \ xor rD2,d2,rW2; \ xor rD3,d3,rW3; /* * ppc_encrypt_aes(u8 *out, const u8 *in, u32 *key_enc, * u32 rounds) * * called from glue layer to encrypt a single 16 byte block * round values are AES128 = 4, AES192 = 5, AES256 = 6 * */ _GLOBAL(ppc_encrypt_aes) INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 0) LOAD_DATA(rD0, 0) LOAD_DATA(rD1, 4) LOAD_DATA(rD2, 8) LOAD_DATA(rD3, 12) START_KEY(rD0, rD1, rD2, rD3) bl ppc_encrypt_block xor rD0,rD0,rW0 SAVE_DATA(rD0, 0) xor rD1,rD1,rW1 SAVE_DATA(rD1, 4) xor rD2,rD2,rW2 SAVE_DATA(rD2, 8) xor rD3,rD3,rW3 SAVE_DATA(rD3, 12) FINALIZE_CRYPT(0) blr /* * ppc_decrypt_aes(u8 *out, const u8 *in, u32 *key_dec, * u32 rounds) * * called from glue layer to decrypt a single 16 byte block * round values are AES128 = 4, AES192 = 5, AES256 = 6 * */ _GLOBAL(ppc_decrypt_aes) INITIALIZE_CRYPT(PPC_AES_4K_DECTAB,0) LOAD_DATA(rD0, 0) addi rT1,rT0,4096 LOAD_DATA(rD1, 4) LOAD_DATA(rD2, 8) LOAD_DATA(rD3, 12) START_KEY(rD0, rD1, rD2, rD3) bl ppc_decrypt_block xor rD0,rD0,rW0 SAVE_DATA(rD0, 0) xor rD1,rD1,rW1 SAVE_DATA(rD1, 4) xor rD2,rD2,rW2 SAVE_DATA(rD2, 8) xor rD3,rD3,rW3 SAVE_DATA(rD3, 12) FINALIZE_CRYPT(0) blr /* * ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, * u32 rounds, u32 bytes); * * called from glue layer to encrypt multiple blocks via ECB * Bytes must be larger or equal 16 and only whole blocks are * processed. round values are AES128 = 4, AES192 = 5 and * AES256 = 6 * */ _GLOBAL(ppc_encrypt_ecb) INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 0) ppc_encrypt_ecb_loop: LOAD_DATA(rD0, 0) mr rKP,rKS LOAD_DATA(rD1, 4) subi rLN,rLN,16 LOAD_DATA(rD2, 8) cmpwi rLN,15 LOAD_DATA(rD3, 12) START_KEY(rD0, rD1, rD2, rD3) bl ppc_encrypt_block xor rD0,rD0,rW0 SAVE_DATA(rD0, 0) xor rD1,rD1,rW1 SAVE_DATA(rD1, 4) xor rD2,rD2,rW2 SAVE_DATA(rD2, 8) xor rD3,rD3,rW3 SAVE_DATA(rD3, 12) NEXT_BLOCK bt gt,ppc_encrypt_ecb_loop FINALIZE_CRYPT(0) blr /* * ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, * u32 rounds, u32 bytes); * * called from glue layer to decrypt multiple blocks via ECB * Bytes must be larger or equal 16 and only whole blocks are * processed. round values are AES128 = 4, AES192 = 5 and * AES256 = 6 * */ _GLOBAL(ppc_decrypt_ecb) INITIALIZE_CRYPT(PPC_AES_4K_DECTAB, 0) addi rT1,rT0,4096 ppc_decrypt_ecb_loop: LOAD_DATA(rD0, 0) mr rKP,rKS LOAD_DATA(rD1, 4) subi rLN,rLN,16 LOAD_DATA(rD2, 8) cmpwi rLN,15 LOAD_DATA(rD3, 12) START_KEY(rD0, rD1, rD2, rD3) bl ppc_decrypt_block xor rD0,rD0,rW0 SAVE_DATA(rD0, 0) xor rD1,rD1,rW1 SAVE_DATA(rD1, 4) xor rD2,rD2,rW2 SAVE_DATA(rD2, 8) xor rD3,rD3,rW3 SAVE_DATA(rD3, 12) NEXT_BLOCK bt gt,ppc_decrypt_ecb_loop FINALIZE_CRYPT(0) blr /* * ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, * 32 rounds, u32 bytes, u8 *iv); * * called from glue layer to encrypt multiple blocks via CBC * Bytes must be larger or equal 16 and only whole blocks are * processed. round values are AES128 = 4, AES192 = 5 and * AES256 = 6 * */ _GLOBAL(ppc_encrypt_cbc) INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 4) LOAD_IV(rI0, 0) LOAD_IV(rI1, 4) LOAD_IV(rI2, 8) LOAD_IV(rI3, 12) ppc_encrypt_cbc_loop: LOAD_DATA(rD0, 0) mr rKP,rKS LOAD_DATA(rD1, 4) subi rLN,rLN,16 LOAD_DATA(rD2, 8) cmpwi rLN,15 LOAD_DATA(rD3, 12) xor rD0,rD0,rI0 xor rD1,rD1,rI1 xor rD2,rD2,rI2 xor rD3,rD3,rI3 START_KEY(rD0, rD1, rD2, rD3) bl ppc_encrypt_block xor rI0,rD0,rW0 SAVE_DATA(rI0, 0) xor rI1,rD1,rW1 SAVE_DATA(rI1, 4) xor rI2,rD2,rW2 SAVE_DATA(rI2, 8) xor rI3,rD3,rW3 SAVE_DATA(rI3, 12) NEXT_BLOCK bt gt,ppc_encrypt_cbc_loop START_IV SAVE_IV(rI0, 0) SAVE_IV(rI1, 4) SAVE_IV(rI2, 8) SAVE_IV(rI3, 12) FINALIZE_CRYPT(4) blr /* * ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, * u32 rounds, u32 bytes, u8 *iv); * * called from glue layer to decrypt multiple blocks via CBC * round values are AES128 = 4, AES192 = 5, AES256 = 6 * */ _GLOBAL(ppc_decrypt_cbc) INITIALIZE_CRYPT(PPC_AES_4K_DECTAB, 4) li rT1,15 LOAD_IV(rI0, 0) andc rLN,rLN,rT1 LOAD_IV(rI1, 4) subi rLN,rLN,16 LOAD_IV(rI2, 8) add rSP,rSP,rLN /* reverse processing */ LOAD_IV(rI3, 12) add rDP,rDP,rLN LOAD_DATA(rD0, 0) addi rT1,rT0,4096 LOAD_DATA(rD1, 4) LOAD_DATA(rD2, 8) LOAD_DATA(rD3, 12) START_IV SAVE_IV(rD0, 0) SAVE_IV(rD1, 4) SAVE_IV(rD2, 8) cmpwi rLN,16 SAVE_IV(rD3, 12) bt lt,ppc_decrypt_cbc_end ppc_decrypt_cbc_loop: mr rKP,rKS START_KEY(rD0, rD1, rD2, rD3) bl ppc_decrypt_block subi rLN,rLN,16 subi rSP,rSP,CBC_DEC xor rW0,rD0,rW0 LOAD_DATA(rD0, 0) xor rW1,rD1,rW1 LOAD_DATA(rD1, 4) xor rW2,rD2,rW2 LOAD_DATA(rD2, 8) xor rW3,rD3,rW3 LOAD_DATA(rD3, 12) xor rW0,rW0,rD0 SAVE_DATA(rW0, 0) xor rW1,rW1,rD1 SAVE_DATA(rW1, 4) xor rW2,rW2,rD2 SAVE_DATA(rW2, 8) xor rW3,rW3,rD3 SAVE_DATA(rW3, 12) cmpwi rLN,15 subi rDP,rDP,CBC_DEC bt gt,ppc_decrypt_cbc_loop ppc_decrypt_cbc_end: mr rKP,rKS START_KEY(rD0, rD1, rD2, rD3) bl ppc_decrypt_block xor rW0,rW0,rD0 xor rW1,rW1,rD1 xor rW2,rW2,rD2 xor rW3,rW3,rD3 xor rW0,rW0,rI0 /* decrypt with initial IV */ SAVE_DATA(rW0, 0) xor rW1,rW1,rI1 SAVE_DATA(rW1, 4) xor rW2,rW2,rI2 SAVE_DATA(rW2, 8) xor rW3,rW3,rI3 SAVE_DATA(rW3, 12) FINALIZE_CRYPT(4) blr /* * ppc_crypt_ctr(u8 *out, const u8 *in, u32 *key_enc, * u32 rounds, u32 bytes, u8 *iv); * * called from glue layer to encrypt/decrypt multiple blocks * via CTR. Number of bytes does not need to be a multiple of * 16. Round values are AES128 = 4, AES192 = 5, AES256 = 6 * */ _GLOBAL(ppc_crypt_ctr) INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 4) LOAD_IV(rI0, 0) LOAD_IV(rI1, 4) LOAD_IV(rI2, 8) cmpwi rLN,16 LOAD_IV(rI3, 12) START_IV bt lt,ppc_crypt_ctr_partial ppc_crypt_ctr_loop: mr rKP,rKS START_KEY(rI0, rI1, rI2, rI3) bl ppc_encrypt_block xor rW0,rD0,rW0 xor rW1,rD1,rW1 xor rW2,rD2,rW2 xor rW3,rD3,rW3 LOAD_DATA(rD0, 0) subi rLN,rLN,16 LOAD_DATA(rD1, 4) LOAD_DATA(rD2, 8) LOAD_DATA(rD3, 12) xor rD0,rD0,rW0 SAVE_DATA(rD0, 0) xor rD1,rD1,rW1 SAVE_DATA(rD1, 4) xor rD2,rD2,rW2 SAVE_DATA(rD2, 8) xor rD3,rD3,rW3 SAVE_DATA(rD3, 12) addic rI3,rI3,1 /* increase counter */ addze rI2,rI2 addze rI1,rI1 addze rI0,rI0 NEXT_BLOCK cmpwi rLN,15 bt gt,ppc_crypt_ctr_loop ppc_crypt_ctr_partial: cmpwi rLN,0 bt eq,ppc_crypt_ctr_end mr rKP,rKS START_KEY(rI0, rI1, rI2, rI3) bl ppc_encrypt_block xor rW0,rD0,rW0 SAVE_IV(rW0, 0) xor rW1,rD1,rW1 SAVE_IV(rW1, 4) xor rW2,rD2,rW2 SAVE_IV(rW2, 8) xor rW3,rD3,rW3 SAVE_IV(rW3, 12) mtctr rLN subi rIP,rIP,CTR_DEC subi rSP,rSP,1 subi rDP,rDP,1 ppc_crypt_ctr_xorbyte: lbzu rW4,1(rIP) /* bytewise xor for partial block */ lbzu rW5,1(rSP) xor rW4,rW4,rW5 stbu rW4,1(rDP) bdnz ppc_crypt_ctr_xorbyte subf rIP,rLN,rIP addi rIP,rIP,1 addic rI3,rI3,1 addze rI2,rI2 addze rI1,rI1 addze rI0,rI0 ppc_crypt_ctr_end: SAVE_IV(rI0, 0) SAVE_IV(rI1, 4) SAVE_IV(rI2, 8) SAVE_IV(rI3, 12) FINALIZE_CRYPT(4) blr /* * ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, * u32 rounds, u32 bytes, u8 *iv, u32 *key_twk); * * called from glue layer to encrypt multiple blocks via XTS * If key_twk is given, the initial IV encryption will be * processed too. Round values are AES128 = 4, AES192 = 5, * AES256 = 6 * */ _GLOBAL(ppc_encrypt_xts) INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 8) LOAD_IV(rI0, 0) LOAD_IV(rI1, 4) LOAD_IV(rI2, 8) cmpwi rKT,0 LOAD_IV(rI3, 12) bt eq,ppc_encrypt_xts_notweak mr rKP,rKT START_KEY(rI0, rI1, rI2, rI3) bl ppc_encrypt_block xor rI0,rD0,rW0 xor rI1,rD1,rW1 xor rI2,rD2,rW2 xor rI3,rD3,rW3 ppc_encrypt_xts_notweak: ENDIAN_SWAP(rG0, rG1, rI0, rI1) ENDIAN_SWAP(rG2, rG3, rI2, rI3) ppc_encrypt_xts_loop: LOAD_DATA(rD0, 0) mr rKP,rKS LOAD_DATA(rD1, 4) subi rLN,rLN,16 LOAD_DATA(rD2, 8) LOAD_DATA(rD3, 12) xor rD0,rD0,rI0 xor rD1,rD1,rI1 xor rD2,rD2,rI2 xor rD3,rD3,rI3 START_KEY(rD0, rD1, rD2, rD3) bl ppc_encrypt_block xor rD0,rD0,rW0 xor rD1,rD1,rW1 xor rD2,rD2,rW2 xor rD3,rD3,rW3 xor rD0,rD0,rI0 SAVE_DATA(rD0, 0) xor rD1,rD1,rI1 SAVE_DATA(rD1, 4) xor rD2,rD2,rI2 SAVE_DATA(rD2, 8) xor rD3,rD3,rI3 SAVE_DATA(rD3, 12) GF128_MUL(rG0, rG1, rG2, rG3, rW0) ENDIAN_SWAP(rI0, rI1, rG0, rG1) ENDIAN_SWAP(rI2, rI3, rG2, rG3) cmpwi rLN,0 NEXT_BLOCK bt gt,ppc_encrypt_xts_loop START_IV SAVE_IV(rI0, 0) SAVE_IV(rI1, 4) SAVE_IV(rI2, 8) SAVE_IV(rI3, 12) FINALIZE_CRYPT(8) blr /* * ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec, * u32 rounds, u32 blocks, u8 *iv, u32 *key_twk); * * called from glue layer to decrypt multiple blocks via XTS * If key_twk is given, the initial IV encryption will be * processed too. Round values are AES128 = 4, AES192 = 5, * AES256 = 6 * */ _GLOBAL(ppc_decrypt_xts) INITIALIZE_CRYPT(PPC_AES_4K_DECTAB, 8) LOAD_IV(rI0, 0) addi rT1,rT0,4096 LOAD_IV(rI1, 4) LOAD_IV(rI2, 8) cmpwi rKT,0 LOAD_IV(rI3, 12) bt eq,ppc_decrypt_xts_notweak subi rT0,rT0,4096 mr rKP,rKT START_KEY(rI0, rI1, rI2, rI3) bl ppc_encrypt_block xor rI0,rD0,rW0 xor rI1,rD1,rW1 xor rI2,rD2,rW2 xor rI3,rD3,rW3 addi rT0,rT0,4096 ppc_decrypt_xts_notweak: ENDIAN_SWAP(rG0, rG1, rI0, rI1) ENDIAN_SWAP(rG2, rG3, rI2, rI3) ppc_decrypt_xts_loop: LOAD_DATA(rD0, 0) mr rKP,rKS LOAD_DATA(rD1, 4) subi rLN,rLN,16 LOAD_DATA(rD2, 8) LOAD_DATA(rD3, 12) xor rD0,rD0,rI0 xor rD1,rD1,rI1 xor rD2,rD2,rI2 xor rD3,rD3,rI3 START_KEY(rD0, rD1, rD2, rD3) bl ppc_decrypt_block xor rD0,rD0,rW0 xor rD1,rD1,rW1 xor rD2,rD2,rW2 xor rD3,rD3,rW3 xor rD0,rD0,rI0 SAVE_DATA(rD0, 0) xor rD1,rD1,rI1 SAVE_DATA(rD1, 4) xor rD2,rD2,rI2 SAVE_DATA(rD2, 8) xor rD3,rD3,rI3 SAVE_DATA(rD3, 12) GF128_MUL(rG0, rG1, rG2, rG3, rW0) ENDIAN_SWAP(rI0, rI1, rG0, rG1) ENDIAN_SWAP(rI2, rI3, rG2, rG3) cmpwi rLN,0 NEXT_BLOCK bt gt,ppc_decrypt_xts_loop START_IV SAVE_IV(rI0, 0) SAVE_IV(rI1, 4) SAVE_IV(rI2, 8) SAVE_IV(rI3, 12) FINALIZE_CRYPT(8) blr
aixcc-public/challenge-001-exemplar-source
1,718
arch/powerpc/lib/div64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Divide a 64-bit unsigned number by a 32-bit unsigned number. * This routine assumes that the top 32 bits of the dividend are * non-zero to start with. * On entry, r3 points to the dividend, which get overwritten with * the 64-bit quotient, and r4 contains the divisor. * On exit, r3 contains the remainder. * * Copyright (C) 2002 Paul Mackerras, IBM Corp. */ #include <asm/ppc_asm.h> #include <asm/processor.h> _GLOBAL(__div64_32) lwz r5,0(r3) # get the dividend into r5/r6 lwz r6,4(r3) cmplw r5,r4 li r7,0 li r8,0 blt 1f divwu r7,r5,r4 # if dividend.hi >= divisor, mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor subf. r5,r0,r5 # dividend.hi %= divisor beq 3f 1: mr r11,r5 # here dividend.hi != 0 andis. r0,r5,0xc000 bne 2f cntlzw r0,r5 # we are shifting the dividend right li r10,-1 # to make it < 2^32, and shifting srw r10,r10,r0 # the divisor right the same amount, addc r9,r4,r10 # rounding up (so the estimate cannot andc r11,r6,r10 # ever be too large, only too small) andc r9,r9,r10 addze r9,r9 or r11,r5,r11 rotlw r9,r9,r0 rotlw r11,r11,r0 divwu r11,r11,r9 # then we divide the shifted quantities 2: mullw r10,r11,r4 # to get an estimate of the quotient, mulhwu r9,r11,r4 # multiply the estimate by the divisor, subfc r6,r10,r6 # take the product from the divisor, add r8,r8,r11 # and add the estimate to the accumulated subfe. r5,r9,r5 # quotient bne 1b 3: cmplw r6,r4 blt 4f divwu r0,r6,r4 # perform the remaining 32-bit division mullw r10,r0,r4 # and get the remainder add r8,r8,r0 subf r6,r10,r6 4: stw r7,0(r3) # return the quotient in *r3 stw r8,4(r3) mr r3,r6 # return the remainder in r3 blr
aixcc-public/challenge-001-exemplar-source
3,507
arch/powerpc/lib/test_emulate_step_exec_instr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Non-emulated single-stepping support (currently limited to basic integer * computations) used to validate the instruction emulation infrastructure. * * Copyright (C) 2019 IBM Corporation */ #include <asm/asm-offsets.h> #include <asm/ppc_asm.h> #include <asm/code-patching-asm.h> #include <linux/errno.h> /* int exec_instr(struct pt_regs *regs) */ _GLOBAL(exec_instr) /* * Stack frame layout (INT_FRAME_SIZE bytes) * In-memory pt_regs (SP + STACK_FRAME_OVERHEAD) * Scratch space (SP + 8) * Back chain (SP + 0) */ /* * Allocate a new stack frame with enough space to hold the register * states in an in-memory pt_regs and also create the back chain to * the caller's stack frame. */ stdu r1, -INT_FRAME_SIZE(r1) /* * Save non-volatile GPRs on stack. This includes TOC pointer (GPR2) * and local variables (GPR14 to GPR31). The register for the pt_regs * parameter (GPR3) is saved additionally to ensure that the resulting * register state can still be saved even if GPR3 gets overwritten * when loading the initial register state for the test instruction. * The stack pointer (GPR1) and the thread pointer (GPR13) are not * saved as these should not be modified anyway. */ SAVE_GPRS(2, 3, r1) SAVE_NVGPRS(r1) /* * Save LR on stack to ensure that the return address is available * even if it gets overwritten by the test instruction. */ mflr r0 std r0, _LINK(r1) /* * Save CR on stack. For simplicity, the entire register is saved * even though only fields 2 to 4 are non-volatile. */ mfcr r0 std r0, _CCR(r1) /* * Load register state for the test instruction without touching the * critical non-volatile registers. The register state is passed as a * pointer to a pt_regs instance. */ subi r31, r3, GPR0 /* Load LR from pt_regs */ ld r0, _LINK(r31) mtlr r0 /* Load CR from pt_regs */ ld r0, _CCR(r31) mtcr r0 /* Load XER from pt_regs */ ld r0, _XER(r31) mtxer r0 /* Load GPRs from pt_regs */ REST_GPR(0, r31) REST_GPRS(2, 12, r31) REST_NVGPRS(r31) /* Placeholder for the test instruction */ .balign 64 1: nop nop patch_site 1b patch__exec_instr /* * Since GPR3 is overwritten, temporarily restore it back to its * original state, i.e. the pointer to pt_regs, to ensure that the * resulting register state can be saved. Before doing this, a copy * of it is created in the scratch space which is used later on to * save it to pt_regs. */ std r3, 8(r1) REST_GPR(3, r1) /* Save resulting GPR state to pt_regs */ subi r3, r3, GPR0 SAVE_GPR(0, r3) SAVE_GPR(2, r3) SAVE_GPRS(4, 12, r3) SAVE_NVGPRS(r3) /* Save resulting LR to pt_regs */ mflr r0 std r0, _LINK(r3) /* Save resulting CR to pt_regs */ mfcr r0 std r0, _CCR(r3) /* Save resulting XER to pt_regs */ mfxer r0 std r0, _XER(r3) /* Restore resulting GPR3 from scratch space and save it to pt_regs */ ld r0, 8(r1) std r0, GPR3(r3) /* Set return value to denote execution success */ li r3, 0 /* Continue */ b 3f /* Set return value to denote execution failure */ 2: li r3, -EFAULT /* Restore the non-volatile GPRs from stack */ 3: REST_GPR(2, r1) REST_NVGPRS(r1) /* Restore LR from stack to be able to return */ ld r0, _LINK(r1) mtlr r0 /* Restore CR from stack */ ld r0, _CCR(r1) mtcr r0 /* Tear down stack frame */ addi r1, r1, INT_FRAME_SIZE /* Return */ blr /* Setup exception table */ EX_TABLE(1b, 2b) _ASM_NOKPROBE_SYMBOL(exec_instr)
aixcc-public/challenge-001-exemplar-source
10,214
arch/powerpc/lib/memcpy_power7.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * Copyright (C) IBM Corporation, 2012 * * Author: Anton Blanchard <anton@au.ibm.com> */ #include <asm/ppc_asm.h> #ifndef SELFTEST_CASE /* 0 == don't use VMX, 1 == use VMX */ #define SELFTEST_CASE 0 #endif #ifdef __BIG_ENDIAN__ #define LVS(VRT,RA,RB) lvsl VRT,RA,RB #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC #else #define LVS(VRT,RA,RB) lvsr VRT,RA,RB #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC #endif _GLOBAL(memcpy_power7) cmpldi r5,16 cmpldi cr1,r5,4096 std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy #ifdef CONFIG_ALTIVEC test_feature = SELFTEST_CASE BEGIN_FTR_SECTION bgt cr1, .Lvmx_copy END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif .Lnonvmx_copy: /* Get the source 8B aligned */ neg r6,r4 mtocrf 0x01,r6 clrldi r6,r6,(64-3) bf cr7*4+3,1f lbz r0,0(r4) addi r4,r4,1 stb r0,0(r3) addi r3,r3,1 1: bf cr7*4+2,2f lhz r0,0(r4) addi r4,r4,2 sth r0,0(r3) addi r3,r3,2 2: bf cr7*4+1,3f lwz r0,0(r4) addi r4,r4,4 stw r0,0(r3) addi r3,r3,4 3: sub r5,r5,r6 cmpldi r5,128 blt 5f mflr r0 stdu r1,-STACKFRAMESIZE(r1) std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) std r17,STK_REG(R17)(r1) std r18,STK_REG(R18)(r1) std r19,STK_REG(R19)(r1) std r20,STK_REG(R20)(r1) std r21,STK_REG(R21)(r1) std r22,STK_REG(R22)(r1) std r0,STACKFRAMESIZE+16(r1) srdi r6,r5,7 mtctr r6 /* Now do cacheline (128B) sized loads and stores. */ .align 5 4: ld r0,0(r4) ld r6,8(r4) ld r7,16(r4) ld r8,24(r4) ld r9,32(r4) ld r10,40(r4) ld r11,48(r4) ld r12,56(r4) ld r14,64(r4) ld r15,72(r4) ld r16,80(r4) ld r17,88(r4) ld r18,96(r4) ld r19,104(r4) ld r20,112(r4) ld r21,120(r4) addi r4,r4,128 std r0,0(r3) std r6,8(r3) std r7,16(r3) std r8,24(r3) std r9,32(r3) std r10,40(r3) std r11,48(r3) std r12,56(r3) std r14,64(r3) std r15,72(r3) std r16,80(r3) std r17,88(r3) std r18,96(r3) std r19,104(r3) std r20,112(r3) std r21,120(r3) addi r3,r3,128 bdnz 4b clrldi r5,r5,(64-7) ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) ld r17,STK_REG(R17)(r1) ld r18,STK_REG(R18)(r1) ld r19,STK_REG(R19)(r1) ld r20,STK_REG(R20)(r1) ld r21,STK_REG(R21)(r1) ld r22,STK_REG(R22)(r1) addi r1,r1,STACKFRAMESIZE /* Up to 127B to go */ 5: srdi r6,r5,4 mtocrf 0x01,r6 6: bf cr7*4+1,7f ld r0,0(r4) ld r6,8(r4) ld r7,16(r4) ld r8,24(r4) ld r9,32(r4) ld r10,40(r4) ld r11,48(r4) ld r12,56(r4) addi r4,r4,64 std r0,0(r3) std r6,8(r3) std r7,16(r3) std r8,24(r3) std r9,32(r3) std r10,40(r3) std r11,48(r3) std r12,56(r3) addi r3,r3,64 /* Up to 63B to go */ 7: bf cr7*4+2,8f ld r0,0(r4) ld r6,8(r4) ld r7,16(r4) ld r8,24(r4) addi r4,r4,32 std r0,0(r3) std r6,8(r3) std r7,16(r3) std r8,24(r3) addi r3,r3,32 /* Up to 31B to go */ 8: bf cr7*4+3,9f ld r0,0(r4) ld r6,8(r4) addi r4,r4,16 std r0,0(r3) std r6,8(r3) addi r3,r3,16 9: clrldi r5,r5,(64-4) /* Up to 15B to go */ .Lshort_copy: mtocrf 0x01,r5 bf cr7*4+0,12f lwz r0,0(r4) /* Less chance of a reject with word ops */ lwz r6,4(r4) addi r4,r4,8 stw r0,0(r3) stw r6,4(r3) addi r3,r3,8 12: bf cr7*4+1,13f lwz r0,0(r4) addi r4,r4,4 stw r0,0(r3) addi r3,r3,4 13: bf cr7*4+2,14f lhz r0,0(r4) addi r4,r4,2 sth r0,0(r3) addi r3,r3,2 14: bf cr7*4+3,15f lbz r0,0(r4) stb r0,0(r3) 15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blr .Lunwind_stack_nonvmx_copy: addi r1,r1,STACKFRAMESIZE b .Lnonvmx_copy .Lvmx_copy: #ifdef CONFIG_ALTIVEC mflr r0 std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_ops cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STK_REG(R31)(r1) ld r4,STK_REG(R30)(r1) ld r5,STK_REG(R29)(r1) mtlr r0 /* * We prefetch both the source and destination using enhanced touch * instructions. We use a stream ID of 0 for the load side and * 1 for the store side. */ clrrdi r6,r4,7 clrrdi r9,r3,7 ori r9,r9,1 /* stream=1 */ srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */ cmpldi r7,0x3FF ble 1f li r7,0x3FF 1: lis r0,0x0E00 /* depth=7 */ sldi r7,r7,7 or r7,r7,r0 ori r10,r7,1 /* stream=1 */ lis r8,0x8000 /* GO=1 */ clrldi r8,r8,32 dcbt 0,r6,0b01000 dcbt 0,r7,0b01010 dcbtst 0,r9,0b01000 dcbtst 0,r10,0b01010 eieio dcbt 0,r8,0b01010 /* GO */ beq cr1,.Lunwind_stack_nonvmx_copy /* * If source and destination are not relatively aligned we use a * slower permute loop. */ xor r6,r4,r3 rldicl. r6,r6,0,(64-4) bne .Lvmx_unaligned_copy /* Get the destination 16B aligned */ neg r6,r3 mtocrf 0x01,r6 clrldi r6,r6,(64-4) bf cr7*4+3,1f lbz r0,0(r4) addi r4,r4,1 stb r0,0(r3) addi r3,r3,1 1: bf cr7*4+2,2f lhz r0,0(r4) addi r4,r4,2 sth r0,0(r3) addi r3,r3,2 2: bf cr7*4+1,3f lwz r0,0(r4) addi r4,r4,4 stw r0,0(r3) addi r3,r3,4 3: bf cr7*4+0,4f ld r0,0(r4) addi r4,r4,8 std r0,0(r3) addi r3,r3,8 4: sub r5,r5,r6 /* Get the desination 128B aligned */ neg r6,r3 srdi r7,r6,4 mtocrf 0x01,r7 clrldi r6,r6,(64-7) li r9,16 li r10,32 li r11,48 bf cr7*4+3,5f lvx v1,0,r4 addi r4,r4,16 stvx v1,0,r3 addi r3,r3,16 5: bf cr7*4+2,6f lvx v1,0,r4 lvx v0,r4,r9 addi r4,r4,32 stvx v1,0,r3 stvx v0,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f lvx v3,0,r4 lvx v2,r4,r9 lvx v1,r4,r10 lvx v0,r4,r11 addi r4,r4,64 stvx v3,0,r3 stvx v2,r3,r9 stvx v1,r3,r10 stvx v0,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 srdi r6,r5,7 std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) li r12,64 li r14,80 li r15,96 li r16,112 mtctr r6 /* * Now do cacheline sized loads and stores. By this stage the * cacheline stores are also cacheline aligned. */ .align 5 8: lvx v7,0,r4 lvx v6,r4,r9 lvx v5,r4,r10 lvx v4,r4,r11 lvx v3,r4,r12 lvx v2,r4,r14 lvx v1,r4,r15 lvx v0,r4,r16 addi r4,r4,128 stvx v7,0,r3 stvx v6,r3,r9 stvx v5,r3,r10 stvx v4,r3,r11 stvx v3,r3,r12 stvx v2,r3,r14 stvx v1,r3,r15 stvx v0,r3,r16 addi r3,r3,128 bdnz 8b ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) srdi r6,r5,4 mtocrf 0x01,r6 bf cr7*4+1,9f lvx v3,0,r4 lvx v2,r4,r9 lvx v1,r4,r10 lvx v0,r4,r11 addi r4,r4,64 stvx v3,0,r3 stvx v2,r3,r9 stvx v1,r3,r10 stvx v0,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f lvx v1,0,r4 lvx v0,r4,r9 addi r4,r4,32 stvx v1,0,r3 stvx v0,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f lvx v1,0,r4 addi r4,r4,16 stvx v1,0,r3 addi r3,r3,16 /* Up to 15B to go */ 11: clrldi r5,r5,(64-4) mtocrf 0x01,r5 bf cr7*4+0,12f ld r0,0(r4) addi r4,r4,8 std r0,0(r3) addi r3,r3,8 12: bf cr7*4+1,13f lwz r0,0(r4) addi r4,r4,4 stw r0,0(r3) addi r3,r3,4 13: bf cr7*4+2,14f lhz r0,0(r4) addi r4,r4,2 sth r0,0(r3) addi r3,r3,2 14: bf cr7*4+3,15f lbz r0,0(r4) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) b exit_vmx_ops /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ neg r6,r3 mtocrf 0x01,r6 clrldi r6,r6,(64-4) bf cr7*4+3,1f lbz r0,0(r4) addi r4,r4,1 stb r0,0(r3) addi r3,r3,1 1: bf cr7*4+2,2f lhz r0,0(r4) addi r4,r4,2 sth r0,0(r3) addi r3,r3,2 2: bf cr7*4+1,3f lwz r0,0(r4) addi r4,r4,4 stw r0,0(r3) addi r3,r3,4 3: bf cr7*4+0,4f lwz r0,0(r4) /* Less chance of a reject with word ops */ lwz r7,4(r4) addi r4,r4,8 stw r0,0(r3) stw r7,4(r3) addi r3,r3,8 4: sub r5,r5,r6 /* Get the desination 128B aligned */ neg r6,r3 srdi r7,r6,4 mtocrf 0x01,r7 clrldi r6,r6,(64-7) li r9,16 li r10,32 li r11,48 LVS(v16,0,r4) /* Setup permute control vector */ lvx v0,0,r4 addi r4,r4,16 bf cr7*4+3,5f lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 stvx v8,0,r3 addi r3,r3,16 vor v0,v1,v1 5: bf cr7*4+2,6f lvx v1,0,r4 VPERM(v8,v0,v1,v16) lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 stvx v8,0,r3 stvx v9,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f lvx v3,0,r4 VPERM(v8,v0,v3,v16) lvx v2,r4,r9 VPERM(v9,v3,v2,v16) lvx v1,r4,r10 VPERM(v10,v2,v1,v16) lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 stvx v8,0,r3 stvx v9,r3,r9 stvx v10,r3,r10 stvx v11,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 srdi r6,r5,7 std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) li r12,64 li r14,80 li r15,96 li r16,112 mtctr r6 /* * Now do cacheline sized loads and stores. By this stage the * cacheline stores are also cacheline aligned. */ .align 5 8: lvx v7,0,r4 VPERM(v8,v0,v7,v16) lvx v6,r4,r9 VPERM(v9,v7,v6,v16) lvx v5,r4,r10 VPERM(v10,v6,v5,v16) lvx v4,r4,r11 VPERM(v11,v5,v4,v16) lvx v3,r4,r12 VPERM(v12,v4,v3,v16) lvx v2,r4,r14 VPERM(v13,v3,v2,v16) lvx v1,r4,r15 VPERM(v14,v2,v1,v16) lvx v0,r4,r16 VPERM(v15,v1,v0,v16) addi r4,r4,128 stvx v8,0,r3 stvx v9,r3,r9 stvx v10,r3,r10 stvx v11,r3,r11 stvx v12,r3,r12 stvx v13,r3,r14 stvx v14,r3,r15 stvx v15,r3,r16 addi r3,r3,128 bdnz 8b ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) srdi r6,r5,4 mtocrf 0x01,r6 bf cr7*4+1,9f lvx v3,0,r4 VPERM(v8,v0,v3,v16) lvx v2,r4,r9 VPERM(v9,v3,v2,v16) lvx v1,r4,r10 VPERM(v10,v2,v1,v16) lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 stvx v8,0,r3 stvx v9,r3,r9 stvx v10,r3,r10 stvx v11,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f lvx v1,0,r4 VPERM(v8,v0,v1,v16) lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 stvx v8,0,r3 stvx v9,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 stvx v8,0,r3 addi r3,r3,16 /* Up to 15B to go */ 11: clrldi r5,r5,(64-4) addi r4,r4,-16 /* Unwind the +16 load offset */ mtocrf 0x01,r5 bf cr7*4+0,12f lwz r0,0(r4) /* Less chance of a reject with word ops */ lwz r6,4(r4) addi r4,r4,8 stw r0,0(r3) stw r6,4(r3) addi r3,r3,8 12: bf cr7*4+1,13f lwz r0,0(r4) addi r4,r4,4 stw r0,0(r3) addi r3,r3,4 13: bf cr7*4+2,14f lhz r0,0(r4) addi r4,r4,2 sth r0,0(r3) addi r3,r3,2 14: bf cr7*4+3,15f lbz r0,0(r4) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) b exit_vmx_ops /* tail call optimise */ #endif /* CONFIG_ALTIVEC */
aixcc-public/challenge-001-exemplar-source
4,272
arch/powerpc/lib/memcpy_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2002 Paul Mackerras, IBM Corp. */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> #include <asm/kasan.h> #ifndef SELFTEST_CASE /* For big-endian, 0 == most CPUs, 1 == POWER6, 2 == Cell */ #define SELFTEST_CASE 0 #endif .align 7 _GLOBAL_TOC_KASAN(memcpy) BEGIN_FTR_SECTION #ifdef __LITTLE_ENDIAN__ cmpdi cr7,r5,0 #else std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */ #endif FTR_SECTION_ELSE #ifdef CONFIG_PPC_BOOK3S_64 b memcpy_power7 #endif ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) #ifdef __LITTLE_ENDIAN__ /* dumb little-endian memcpy that will get replaced at runtime */ addi r9,r3,-1 addi r4,r4,-1 beqlr cr7 mtctr r5 1: lbzu r10,1(r4) stbu r10,1(r9) bdnz 1b blr #else PPC_MTOCRF(0x01,r5) cmpldi cr1,r5,16 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry andi. r6,r6,7 dcbt 0,r4 blt cr1,.Lshort_copy /* Below we want to nop out the bne if we're on a CPU that has the CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit cleared. At the time of writing the only CPU that has this combination of bits set is Power6. */ test_feature = (SELFTEST_CASE == 1) BEGIN_FTR_SECTION nop FTR_SECTION_ELSE bne .Ldst_unaligned ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ CPU_FTR_UNALIGNED_LD_STD) .Ldst_aligned: addi r3,r3,-16 test_feature = (SELFTEST_CASE == 0) BEGIN_FTR_SECTION andi. r0,r4,7 bne .Lsrc_unaligned END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) srdi r7,r5,4 ld r9,0(r4) addi r4,r4,-8 mtctr r7 andi. r5,r5,7 bf cr7*4+0,2f addi r3,r3,8 addi r4,r4,8 mr r8,r9 blt cr1,3f 1: ld r9,8(r4) std r8,8(r3) 2: ldu r8,16(r4) stdu r9,16(r3) bdnz 1b 3: std r8,8(r3) beq 3f addi r3,r3,16 .Ldo_tail: bf cr7*4+1,1f lwz r9,8(r4) addi r4,r4,4 stw r9,0(r3) addi r3,r3,4 1: bf cr7*4+2,2f lhz r9,8(r4) addi r4,r4,2 sth r9,0(r3) addi r3,r3,2 2: bf cr7*4+3,3f lbz r9,8(r4) stb r9,0(r3) 3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Lsrc_unaligned: srdi r6,r5,3 addi r5,r5,-16 subf r4,r0,r4 srdi r7,r5,4 sldi r10,r0,3 cmpdi cr6,r6,3 andi. r5,r5,7 mtctr r7 subfic r11,r10,64 add r5,r5,r0 bt cr7*4+0,0f ld r9,0(r4) # 3+2n loads, 2+2n stores ld r0,8(r4) sld r6,r9,r10 ldu r9,16(r4) srd r7,r0,r11 sld r8,r0,r10 or r7,r7,r6 blt cr6,4f ld r0,8(r4) # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12 b 2f 0: ld r0,0(r4) # 4+2n loads, 3+2n stores ldu r9,8(r4) sld r8,r0,r10 addi r3,r3,-8 blt cr6,5f ld r0,8(r4) srd r12,r9,r11 sld r6,r9,r10 ldu r9,16(r4) or r12,r8,r12 srd r7,r0,r11 sld r8,r0,r10 addi r3,r3,16 beq cr6,3f # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9 1: or r7,r7,r6 ld r0,8(r4) std r12,8(r3) 2: srd r12,r9,r11 sld r6,r9,r10 ldu r9,16(r4) or r12,r8,r12 stdu r7,16(r3) srd r7,r0,r11 sld r8,r0,r10 bdnz 1b 3: std r12,8(r3) or r7,r7,r6 4: std r7,16(r3) 5: srd r12,r9,r11 or r12,r8,r12 std r12,24(r3) beq 4f cmpwi cr1,r5,8 addi r3,r3,32 sld r9,r9,r10 ble cr1,6f ld r0,8(r4) srd r7,r0,r11 or r9,r7,r9 6: bf cr7*4+1,1f rotldi r9,r9,32 stw r9,0(r3) addi r3,r3,4 1: bf cr7*4+2,2f rotldi r9,r9,16 sth r9,0(r3) addi r3,r3,2 2: bf cr7*4+3,3f rotldi r9,r9,8 stb r9,0(r3) 3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Ldst_unaligned: PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7 subf r5,r6,r5 li r7,0 cmpldi cr1,r5,16 bf cr7*4+3,1f lbz r0,0(r4) stb r0,0(r3) addi r7,r7,1 1: bf cr7*4+2,2f lhzx r0,r7,r4 sthx r0,r7,r3 addi r7,r7,2 2: bf cr7*4+1,3f lwzx r0,r7,r4 stwx r0,r7,r3 3: PPC_MTOCRF(0x01,r5) add r4,r6,r4 add r3,r6,r3 b .Ldst_aligned .Lshort_copy: bf cr7*4+0,1f lwz r0,0(r4) lwz r9,4(r4) addi r4,r4,8 stw r0,0(r3) stw r9,4(r3) addi r3,r3,8 1: bf cr7*4+1,2f lwz r0,0(r4) addi r4,r4,4 stw r0,0(r3) addi r3,r3,4 2: bf cr7*4+2,3f lhz r0,0(r4) addi r4,r4,2 sth r0,0(r3) addi r3,r3,2 3: bf cr7*4+3,4f lbz r0,0(r4) stb r0,0(r3) 4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr #endif EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL_KASAN(memcpy)
aixcc-public/challenge-001-exemplar-source
2,721
arch/powerpc/lib/string_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * Copyright (C) IBM Corporation, 2012 * * Author: Anton Blanchard <anton@au.ibm.com> */ #include <asm/ppc_asm.h> #include <asm/linkage.h> #include <asm/asm-offsets.h> #include <asm/export.h> /** * __arch_clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * * Zero a block of memory in user space. Caller must check * the specified block with access_ok() before calling this function. * * Returns number of bytes that could not be cleared. * On success, this will be zero. */ .macro err1 100: EX_TABLE(100b,.Ldo_err1) .endm .macro err2 200: EX_TABLE(200b,.Ldo_err2) .endm .macro err3 300: EX_TABLE(300b,.Ldo_err3) .endm .Ldo_err1: mr r3,r8 .Ldo_err2: mtctr r4 1: err3; stb r0,0(r3) addi r3,r3,1 addi r4,r4,-1 bdnz 1b .Ldo_err3: mr r3,r4 blr _GLOBAL_TOC(__arch_clear_user) cmpdi r4,32 neg r6,r3 li r0,0 blt .Lshort_clear mr r8,r3 mtocrf 0x01,r6 clrldi r6,r6,(64-3) /* Get the destination 8 byte aligned */ bf cr7*4+3,1f err1; stb r0,0(r3) addi r3,r3,1 1: bf cr7*4+2,2f err1; sth r0,0(r3) addi r3,r3,2 2: bf cr7*4+1,3f err1; stw r0,0(r3) addi r3,r3,4 3: sub r4,r4,r6 cmpdi r4,32 cmpdi cr1,r4,512 blt .Lshort_clear bgt cr1,.Llong_clear .Lmedium_clear: srdi r6,r4,5 mtctr r6 /* Do 32 byte chunks */ 4: err2; std r0,0(r3) err2; std r0,8(r3) err2; std r0,16(r3) err2; std r0,24(r3) addi r3,r3,32 addi r4,r4,-32 bdnz 4b .Lshort_clear: /* up to 31 bytes to go */ cmpdi r4,16 blt 6f err2; std r0,0(r3) err2; std r0,8(r3) addi r3,r3,16 addi r4,r4,-16 /* Up to 15 bytes to go */ 6: mr r8,r3 clrldi r4,r4,(64-4) mtocrf 0x01,r4 bf cr7*4+0,7f err1; std r0,0(r3) addi r3,r3,8 7: bf cr7*4+1,8f err1; stw r0,0(r3) addi r3,r3,4 8: bf cr7*4+2,9f err1; sth r0,0(r3) addi r3,r3,2 9: bf cr7*4+3,10f err1; stb r0,0(r3) 10: li r3,0 blr .Llong_clear: LOAD_REG_ADDR(r5, ppc64_caches) bf cr7*4+0,11f err2; std r0,0(r3) addi r3,r3,8 addi r4,r4,-8 /* Destination is 16 byte aligned, need to get it cache block aligned */ 11: lwz r7,DCACHEL1LOGBLOCKSIZE(r5) lwz r9,DCACHEL1BLOCKSIZE(r5) /* * With worst case alignment the long clear loop takes a minimum * of 1 byte less than 2 cachelines. */ sldi r10,r9,2 cmpd r4,r10 blt .Lmedium_clear neg r6,r3 addi r10,r9,-1 and. r5,r6,r10 beq 13f srdi r6,r5,4 mtctr r6 mr r8,r3 12: err1; std r0,0(r3) err1; std r0,8(r3) addi r3,r3,16 bdnz 12b sub r4,r4,r5 13: srd r6,r4,r7 mtctr r6 mr r8,r3 14: err1; dcbz 0,r3 add r3,r3,r9 bdnz 14b and r4,r4,r10 cmpdi r4,32 blt .Lshort_clear b .Lmedium_clear EXPORT_SYMBOL(__arch_clear_user)
aixcc-public/challenge-001-exemplar-source
2,834
arch/powerpc/lib/copypage_power7.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * Copyright (C) IBM Corporation, 2012 * * Author: Anton Blanchard <anton@au.ibm.com> */ #include <asm/page.h> #include <asm/ppc_asm.h> _GLOBAL(copypage_power7) /* * We prefetch both the source and destination using enhanced touch * instructions. We use a stream ID of 0 for the load side and * 1 for the store side. Since source and destination are page * aligned we don't need to clear the bottom 7 bits of either * address. */ ori r9,r3,1 /* stream=1 => to */ #ifdef CONFIG_PPC_64K_PAGES lis r7,0x0E01 /* depth=7 * units/cachelines=512 */ #else lis r7,0x0E00 /* depth=7 */ ori r7,r7,0x1000 /* units/cachelines=32 */ #endif ori r10,r7,1 /* stream=1 */ lis r8,0x8000 /* GO=1 */ clrldi r8,r8,32 /* setup read stream 0 */ dcbt 0,r4,0b01000 /* addr from */ dcbt 0,r7,0b01010 /* length and depth from */ /* setup write stream 1 */ dcbtst 0,r9,0b01000 /* addr to */ dcbtst 0,r10,0b01010 /* length and depth to */ eieio dcbt 0,r8,0b01010 /* all streams GO */ #ifdef CONFIG_ALTIVEC mflr r0 std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_ops cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STK_REG(R31)(r1) ld r4,STK_REG(R30)(r1) mtlr r0 li r0,(PAGE_SIZE/128) mtctr r0 beq .Lnonvmx_copy addi r1,r1,STACKFRAMESIZE li r6,16 li r7,32 li r8,48 li r9,64 li r10,80 li r11,96 li r12,112 .align 5 1: lvx v7,0,r4 lvx v6,r4,r6 lvx v5,r4,r7 lvx v4,r4,r8 lvx v3,r4,r9 lvx v2,r4,r10 lvx v1,r4,r11 lvx v0,r4,r12 addi r4,r4,128 stvx v7,0,r3 stvx v6,r3,r6 stvx v5,r3,r7 stvx v4,r3,r8 stvx v3,r3,r9 stvx v2,r3,r10 stvx v1,r3,r11 stvx v0,r3,r12 addi r3,r3,128 bdnz 1b b exit_vmx_ops /* tail call optimise */ #else li r0,(PAGE_SIZE/128) mtctr r0 stdu r1,-STACKFRAMESIZE(r1) #endif .Lnonvmx_copy: std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) std r17,STK_REG(R17)(r1) std r18,STK_REG(R18)(r1) std r19,STK_REG(R19)(r1) std r20,STK_REG(R20)(r1) 1: ld r0,0(r4) ld r5,8(r4) ld r6,16(r4) ld r7,24(r4) ld r8,32(r4) ld r9,40(r4) ld r10,48(r4) ld r11,56(r4) ld r12,64(r4) ld r14,72(r4) ld r15,80(r4) ld r16,88(r4) ld r17,96(r4) ld r18,104(r4) ld r19,112(r4) ld r20,120(r4) addi r4,r4,128 std r0,0(r3) std r5,8(r3) std r6,16(r3) std r7,24(r3) std r8,32(r3) std r9,40(r3) std r10,48(r3) std r11,56(r3) std r12,64(r3) std r14,72(r3) std r15,80(r3) std r16,88(r3) std r17,96(r3) std r18,104(r3) std r19,112(r3) std r20,120(r3) addi r3,r3,128 bdnz 1b ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) ld r17,STK_REG(R17)(r1) ld r18,STK_REG(R18)(r1) ld r19,STK_REG(R19)(r1) ld r20,STK_REG(R20)(r1) addi r1,r1,STACKFRAMESIZE blr
aixcc-public/challenge-001-exemplar-source
2,691
arch/powerpc/lib/strlen_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * strlen() for PPC32 * * Copyright (C) 2018 Christophe Leroy CS Systemes d'Information. * * Inspired from glibc implementation */ #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/cache.h> .text /* * Algorithm: * * 1) Given a word 'x', we can test to see if it contains any 0 bytes * by subtracting 0x01010101, and seeing if any of the high bits of each * byte changed from 0 to 1. This works because the least significant * 0 byte must have had no incoming carry (otherwise it's not the least * significant), so it is 0x00 - 0x01 == 0xff. For all other * byte values, either they have the high bit set initially, or when * 1 is subtracted you get a value in the range 0x00-0x7f, none of which * have their high bit set. The expression here is * (x - 0x01010101) & ~x & 0x80808080), which gives 0x00000000 when * there were no 0x00 bytes in the word. You get 0x80 in bytes that * match, but possibly false 0x80 matches in the next more significant * byte to a true match due to carries. For little-endian this is * of no consequence since the least significant match is the one * we're interested in, but big-endian needs method 2 to find which * byte matches. * 2) Given a word 'x', we can test to see _which_ byte was zero by * calculating ~(((x & ~0x80808080) - 0x80808080 - 1) | x | ~0x80808080). * This produces 0x80 in each byte that was zero, and 0x00 in all * the other bytes. The '| ~0x80808080' clears the low 7 bits in each * byte, and the '| x' part ensures that bytes with the high bit set * produce 0x00. The addition will carry into the high bit of each byte * iff that byte had one of its low 7 bits set. We can then just see * which was the most significant bit set and divide by 8 to find how * many to add to the index. * This is from the book 'The PowerPC Compiler Writer's Guide', * by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren. */ _GLOBAL(strlen) andi. r0, r3, 3 lis r7, 0x0101 addi r10, r3, -4 addic r7, r7, 0x0101 /* r7 = 0x01010101 (lomagic) & clear XER[CA] */ rotlwi r6, r7, 31 /* r6 = 0x80808080 (himagic) */ bne- 3f .balign IFETCH_ALIGN_BYTES 1: lwzu r9, 4(r10) 2: subf r8, r7, r9 and. r8, r8, r6 beq+ 1b andc. r8, r8, r9 beq+ 1b andc r8, r9, r6 orc r9, r9, r6 subfe r8, r6, r8 nor r8, r8, r9 cntlzw r8, r8 subf r3, r3, r10 srwi r8, r8, 3 add r3, r3, r8 blr /* Missaligned string: make sure bytes before string are seen not 0 */ 3: xor r10, r10, r0 orc r8, r8, r8 lwzu r9, 4(r10) slwi r0, r0, 3 srw r8, r8, r0 orc r9, r9, r8 b 2b EXPORT_SYMBOL(strlen)
aixcc-public/challenge-001-exemplar-source
1,472
arch/powerpc/lib/string_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * String handling functions for PowerPC32 * * Copyright (C) 1996 Paul Mackerras. * */ #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/cache.h> .text CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) _GLOBAL(__arch_clear_user) /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination * area is cacheable. */ cmplwi cr0, r4, 4 mr r10, r3 li r3, 0 blt 7f 11: stw r3, 0(r10) beqlr andi. r0, r10, 3 add r11, r0, r4 subf r6, r0, r10 clrlwi r7, r6, 32 - LG_CACHELINE_BYTES add r8, r7, r11 srwi r9, r8, LG_CACHELINE_BYTES addic. r9, r9, -1 /* total number of complete cachelines */ ble 2f xori r0, r7, CACHELINE_MASK & ~3 srwi. r0, r0, 2 beq 3f mtctr r0 4: stwu r3, 4(r6) bdnz 4b 3: mtctr r9 li r7, 4 10: dcbz r7, r6 addi r6, r6, CACHELINE_BYTES bdnz 10b clrlwi r11, r8, 32 - LG_CACHELINE_BYTES addi r11, r11, 4 2: srwi r0 ,r11 ,2 mtctr r0 bdz 6f 1: stwu r3, 4(r6) bdnz 1b 6: andi. r11, r11, 3 beqlr mtctr r11 addi r6, r6, 3 8: stbu r3, 1(r6) bdnz 8b blr 7: cmpwi cr0, r4, 0 beqlr mtctr r4 addi r6, r10, -1 9: stbu r3, 1(r6) bdnz 9b blr 90: mr r3, r4 blr 91: add r3, r10, r4 subf r3, r6, r3 blr EX_TABLE(11b, 90b) EX_TABLE(4b, 91b) EX_TABLE(10b, 91b) EX_TABLE(1b, 91b) EX_TABLE(8b, 91b) EX_TABLE(9b, 91b) EXPORT_SYMBOL(__arch_clear_user)
aixcc-public/challenge-001-exemplar-source
1,922
arch/powerpc/lib/copypage_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2008 Mark Nelson, IBM Corp. */ #include <asm/page.h> #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/feature-fixups.h> _GLOBAL_TOC(copy_page) BEGIN_FTR_SECTION lis r5,PAGE_SIZE@h FTR_SECTION_ELSE #ifdef CONFIG_PPC_BOOK3S_64 b copypage_power7 #endif ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION LOAD_REG_ADDR(r10, ppc64_caches) lwz r11,DCACHEL1LOGBLOCKSIZE(r10) /* log2 of cache block size */ lwz r12,DCACHEL1BLOCKSIZE(r10) /* get cache block size */ li r9,0 srd r8,r5,r11 mtctr r8 .Lsetup: dcbt r9,r4 dcbz r9,r3 add r9,r9,r12 bdnz .Lsetup END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) addi r3,r3,-8 srdi r8,r5,7 /* page is copied in 128 byte strides */ addi r8,r8,-1 /* one stride copied outside loop */ mtctr r8 ld r5,0(r4) ld r6,8(r4) ld r7,16(r4) ldu r8,24(r4) 1: std r5,8(r3) std r6,16(r3) ld r9,8(r4) ld r10,16(r4) std r7,24(r3) std r8,32(r3) ld r11,24(r4) ld r12,32(r4) std r9,40(r3) std r10,48(r3) ld r5,40(r4) ld r6,48(r4) std r11,56(r3) std r12,64(r3) ld r7,56(r4) ld r8,64(r4) std r5,72(r3) std r6,80(r3) ld r9,72(r4) ld r10,80(r4) std r7,88(r3) std r8,96(r3) ld r11,88(r4) ld r12,96(r4) std r9,104(r3) std r10,112(r3) ld r5,104(r4) ld r6,112(r4) std r11,120(r3) stdu r12,128(r3) ld r7,120(r4) ldu r8,128(r4) bdnz 1b std r5,8(r3) std r6,16(r3) ld r9,8(r4) ld r10,16(r4) std r7,24(r3) std r8,32(r3) ld r11,24(r4) ld r12,32(r4) std r9,40(r3) std r10,48(r3) ld r5,40(r4) ld r6,48(r4) std r11,56(r3) std r12,64(r3) ld r7,56(r4) ld r8,64(r4) std r5,72(r3) std r6,80(r3) ld r9,72(r4) ld r10,80(r4) std r7,88(r3) std r8,96(r3) ld r11,88(r4) ld r12,96(r4) std r9,104(r3) std r10,112(r3) std r11,120(r3) std r12,128(r3) blr EXPORT_SYMBOL(copy_page)
aixcc-public/challenge-001-exemplar-source
2,258
arch/powerpc/lib/mem_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * String handling functions for PowerPC. * * Copyright (C) 1996 Paul Mackerras. */ #include <asm/processor.h> #include <asm/errno.h> #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/kasan.h> #ifndef CONFIG_KASAN _GLOBAL(__memset16) rlwimi r4,r4,16,0,15 /* fall through */ _GLOBAL(__memset32) rldimi r4,r4,32,0 /* fall through */ _GLOBAL(__memset64) neg r0,r3 andi. r0,r0,7 cmplw cr1,r5,r0 b .Lms EXPORT_SYMBOL(__memset16) EXPORT_SYMBOL(__memset32) EXPORT_SYMBOL(__memset64) #endif _GLOBAL_KASAN(memset) neg r0,r3 rlwimi r4,r4,8,16,23 andi. r0,r0,7 /* # bytes to be 8-byte aligned */ rlwimi r4,r4,16,0,15 cmplw cr1,r5,r0 /* do we get that far? */ rldimi r4,r4,32,0 .Lms: PPC_MTOCRF(1,r0) mr r6,r3 blt cr1,8f beq 3f /* if already 8-byte aligned */ subf r5,r0,r5 bf 31,1f stb r4,0(r6) addi r6,r6,1 1: bf 30,2f sth r4,0(r6) addi r6,r6,2 2: bf 29,3f stw r4,0(r6) addi r6,r6,4 3: srdi. r0,r5,6 clrldi r5,r5,58 mtctr r0 beq 5f .balign 16 4: std r4,0(r6) std r4,8(r6) std r4,16(r6) std r4,24(r6) std r4,32(r6) std r4,40(r6) std r4,48(r6) std r4,56(r6) addi r6,r6,64 bdnz 4b 5: srwi. r0,r5,3 clrlwi r5,r5,29 PPC_MTOCRF(1,r0) beq 8f bf 29,6f std r4,0(r6) std r4,8(r6) std r4,16(r6) std r4,24(r6) addi r6,r6,32 6: bf 30,7f std r4,0(r6) std r4,8(r6) addi r6,r6,16 7: bf 31,8f std r4,0(r6) addi r6,r6,8 8: cmpwi r5,0 PPC_MTOCRF(1,r5) beqlr bf 29,9f stw r4,0(r6) addi r6,r6,4 9: bf 30,10f sth r4,0(r6) addi r6,r6,2 10: bflr 31 stb r4,0(r6) blr EXPORT_SYMBOL(memset) EXPORT_SYMBOL_KASAN(memset) _GLOBAL_TOC_KASAN(memmove) cmplw 0,r3,r4 bgt backwards_memcpy b memcpy _GLOBAL(backwards_memcpy) rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ add r6,r3,r5 add r4,r4,r5 beq 2f andi. r0,r6,3 mtctr r7 bne 5f .balign 16 1: lwz r7,-4(r4) lwzu r8,-8(r4) stw r7,-4(r6) stwu r8,-8(r6) bdnz 1b andi. r5,r5,7 2: cmplwi 0,r5,4 blt 3f lwzu r0,-4(r4) subi r5,r5,4 stwu r0,-4(r6) 3: cmpwi 0,r5,0 beqlr mtctr r5 4: lbzu r0,-1(r4) stbu r0,-1(r6) bdnz 4b blr 5: mtctr r0 6: lbzu r7,-1(r4) stbu r7,-1(r6) bdnz 6b subf r5,r0,r5 rlwinm. r7,r5,32-3,3,31 beq 2b mtctr r7 b 1b EXPORT_SYMBOL(memmove) EXPORT_SYMBOL_KASAN(memmove)
aixcc-public/challenge-001-exemplar-source
3,787
arch/powerpc/lib/ldstfp.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Floating-point, VMX/Altivec and VSX loads and stores * for use in instruction emulation. * * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/ppc-opcode.h> #include <asm/reg.h> #include <asm/asm-offsets.h> #include <asm/asm-compat.h> #include <linux/errno.h> #define STKFRM (PPC_MIN_STKFRM + 16) /* Get the contents of frN into *p; N is in r3 and p is in r4. */ _GLOBAL(get_fpr) mflr r0 mfmsr r6 ori r7, r6, MSR_FP MTMSRD(r7) isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f reg = 0 .rept 32 stfd reg, 0(r4) b 2f reg = reg + 1 .endr 1: mflr r5 add r5,r3,r5 mtctr r5 mtlr r0 bctr 2: MTMSRD(r6) isync blr /* Put the contents of *p into frN; N is in r3 and p is in r4. */ _GLOBAL(put_fpr) mflr r0 mfmsr r6 ori r7, r6, MSR_FP MTMSRD(r7) isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f reg = 0 .rept 32 lfd reg, 0(r4) b 2f reg = reg + 1 .endr 1: mflr r5 add r5,r3,r5 mtctr r5 mtlr r0 bctr 2: MTMSRD(r6) isync blr #ifdef CONFIG_ALTIVEC /* Get the contents of vrN into *p; N is in r3 and p is in r4. */ _GLOBAL(get_vr) mflr r0 mfmsr r6 oris r7, r6, MSR_VEC@h MTMSRD(r7) isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f reg = 0 .rept 32 stvx reg, 0, r4 b 2f reg = reg + 1 .endr 1: mflr r5 add r5,r3,r5 mtctr r5 mtlr r0 bctr 2: MTMSRD(r6) isync blr /* Put the contents of *p into vrN; N is in r3 and p is in r4. */ _GLOBAL(put_vr) mflr r0 mfmsr r6 oris r7, r6, MSR_VEC@h MTMSRD(r7) isync rlwinm r3,r3,3,0xf8 bcl 20,31,1f reg = 0 .rept 32 lvx reg, 0, r4 b 2f reg = reg + 1 .endr 1: mflr r5 add r5,r3,r5 mtctr r5 mtlr r0 bctr 2: MTMSRD(r6) isync blr #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX /* Get the contents of vsN into vs0; N is in r3. */ _GLOBAL(get_vsr) mflr r0 rlwinm r3,r3,3,0x1f8 bcl 20,31,1f blr /* vs0 is already in vs0 */ nop reg = 1 .rept 63 XXLOR(0,reg,reg) blr reg = reg + 1 .endr 1: mflr r5 add r5,r3,r5 mtctr r5 mtlr r0 bctr /* Put the contents of vs0 into vsN; N is in r3. */ _GLOBAL(put_vsr) mflr r0 rlwinm r3,r3,3,0x1f8 bcl 20,31,1f blr /* v0 is already in v0 */ nop reg = 1 .rept 63 XXLOR(reg,0,0) blr reg = reg + 1 .endr 1: mflr r5 add r5,r3,r5 mtctr r5 mtlr r0 bctr /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */ _GLOBAL(load_vsrn) PPC_STLU r1,-STKFRM(r1) mflr r0 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) mfmsr r6 oris r7,r6,MSR_VSX@h cmpwi cr7,r3,0 li r8,STKFRM-16 MTMSRD(r7) isync beq cr7,1f STXVD2X(0,R1,R8) 1: LXVD2X(0,R0,R4) #ifdef __LITTLE_ENDIAN__ XXSWAPD(0,0) #endif beq cr7,4f bl put_vsr LXVD2X(0,R1,R8) 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) mtlr r0 MTMSRD(r6) isync addi r1,r1,STKFRM blr /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ _GLOBAL(store_vsrn) PPC_STLU r1,-STKFRM(r1) mflr r0 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) mfmsr r6 oris r7,r6,MSR_VSX@h li r8,STKFRM-16 MTMSRD(r7) isync STXVD2X(0,R1,R8) bl get_vsr #ifdef __LITTLE_ENDIAN__ XXSWAPD(0,0) #endif STXVD2X(0,R0,R4) LXVD2X(0,R1,R8) PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) mtlr r0 MTMSRD(r6) isync mr r3,r9 addi r1,r1,STKFRM blr #endif /* CONFIG_VSX */ /* Convert single-precision to double, without disturbing FPRs. */ /* conv_sp_to_dp(float *sp, double *dp) */ _GLOBAL(conv_sp_to_dp) mfmsr r6 ori r7, r6, MSR_FP MTMSRD(r7) isync stfd fr0, -16(r1) lfs fr0, 0(r3) stfd fr0, 0(r4) lfd fr0, -16(r1) MTMSRD(r6) isync blr /* Convert single-precision to double, without disturbing FPRs. */ /* conv_sp_to_dp(double *dp, float *sp) */ _GLOBAL(conv_dp_to_sp) mfmsr r6 ori r7, r6, MSR_FP MTMSRD(r7) isync stfd fr0, -16(r1) lfd fr0, 0(r3) stfs fr0, 0(r4) lfd fr0, -16(r1) MTMSRD(r6) isync blr
aixcc-public/challenge-001-exemplar-source
1,078
arch/powerpc/lib/quad.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Quadword loads and stores * for use in instruction emulation. * * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/ppc-opcode.h> #include <asm/reg.h> #include <asm/asm-offsets.h> #include <linux/errno.h> /* do_lq(unsigned long ea, unsigned long *regs) */ _GLOBAL(do_lq) 1: lq r6, 0(r3) std r6, 0(r4) std r7, 8(r4) li r3, 0 blr 2: li r3, -EFAULT blr EX_TABLE(1b, 2b) /* do_stq(unsigned long ea, unsigned long val0, unsigned long val1) */ _GLOBAL(do_stq) 1: stq r4, 0(r3) li r3, 0 blr 2: li r3, -EFAULT blr EX_TABLE(1b, 2b) /* do_lqarx(unsigned long ea, unsigned long *regs) */ _GLOBAL(do_lqarx) 1: PPC_LQARX(6, 0, 3, 0) std r6, 0(r4) std r7, 8(r4) li r3, 0 blr 2: li r3, -EFAULT blr EX_TABLE(1b, 2b) /* do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, unsigned int *crp) */ _GLOBAL(do_stqcx) 1: PPC_STQCX(4, 0, 3) mfcr r5 stw r5, 0(r6) li r3, 0 blr 2: li r3, -EFAULT blr EX_TABLE(1b, 2b)
aixcc-public/challenge-001-exemplar-source
12,150
arch/powerpc/lib/copyuser_power7.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * Copyright (C) IBM Corporation, 2011 * * Author: Anton Blanchard <anton@au.ibm.com> */ #include <asm/ppc_asm.h> #ifndef SELFTEST_CASE /* 0 == don't use VMX, 1 == use VMX */ #define SELFTEST_CASE 0 #endif #ifdef __BIG_ENDIAN__ #define LVS(VRT,RA,RB) lvsl VRT,RA,RB #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC #else #define LVS(VRT,RA,RB) lvsr VRT,RA,RB #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC #endif .macro err1 100: EX_TABLE(100b,.Ldo_err1) .endm .macro err2 200: EX_TABLE(200b,.Ldo_err2) .endm #ifdef CONFIG_ALTIVEC .macro err3 300: EX_TABLE(300b,.Ldo_err3) .endm .macro err4 400: EX_TABLE(400b,.Ldo_err4) .endm .Ldo_err4: ld r16,STK_REG(R16)(r1) ld r15,STK_REG(R15)(r1) ld r14,STK_REG(R14)(r1) .Ldo_err3: bl exit_vmx_usercopy ld r0,STACKFRAMESIZE+16(r1) mtlr r0 b .Lexit #endif /* CONFIG_ALTIVEC */ .Ldo_err2: ld r22,STK_REG(R22)(r1) ld r21,STK_REG(R21)(r1) ld r20,STK_REG(R20)(r1) ld r19,STK_REG(R19)(r1) ld r18,STK_REG(R18)(r1) ld r17,STK_REG(R17)(r1) ld r16,STK_REG(R16)(r1) ld r15,STK_REG(R15)(r1) ld r14,STK_REG(R14)(r1) .Lexit: addi r1,r1,STACKFRAMESIZE .Ldo_err1: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1) ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1) b __copy_tofrom_user_base _GLOBAL(__copy_tofrom_user_power7) cmpldi r5,16 cmpldi cr1,r5,3328 std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy #ifdef CONFIG_ALTIVEC test_feature = SELFTEST_CASE BEGIN_FTR_SECTION bgt cr1,.Lvmx_copy END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif .Lnonvmx_copy: /* Get the source 8B aligned */ neg r6,r4 mtocrf 0x01,r6 clrldi r6,r6,(64-3) bf cr7*4+3,1f err1; lbz r0,0(r4) addi r4,r4,1 err1; stb r0,0(r3) addi r3,r3,1 1: bf cr7*4+2,2f err1; lhz r0,0(r4) addi r4,r4,2 err1; sth r0,0(r3) addi r3,r3,2 2: bf cr7*4+1,3f err1; lwz r0,0(r4) addi r4,r4,4 err1; stw r0,0(r3) addi r3,r3,4 3: sub r5,r5,r6 cmpldi r5,128 blt 5f mflr r0 stdu r1,-STACKFRAMESIZE(r1) std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) std r17,STK_REG(R17)(r1) std r18,STK_REG(R18)(r1) std r19,STK_REG(R19)(r1) std r20,STK_REG(R20)(r1) std r21,STK_REG(R21)(r1) std r22,STK_REG(R22)(r1) std r0,STACKFRAMESIZE+16(r1) srdi r6,r5,7 mtctr r6 /* Now do cacheline (128B) sized loads and stores. */ .align 5 4: err2; ld r0,0(r4) err2; ld r6,8(r4) err2; ld r7,16(r4) err2; ld r8,24(r4) err2; ld r9,32(r4) err2; ld r10,40(r4) err2; ld r11,48(r4) err2; ld r12,56(r4) err2; ld r14,64(r4) err2; ld r15,72(r4) err2; ld r16,80(r4) err2; ld r17,88(r4) err2; ld r18,96(r4) err2; ld r19,104(r4) err2; ld r20,112(r4) err2; ld r21,120(r4) addi r4,r4,128 err2; std r0,0(r3) err2; std r6,8(r3) err2; std r7,16(r3) err2; std r8,24(r3) err2; std r9,32(r3) err2; std r10,40(r3) err2; std r11,48(r3) err2; std r12,56(r3) err2; std r14,64(r3) err2; std r15,72(r3) err2; std r16,80(r3) err2; std r17,88(r3) err2; std r18,96(r3) err2; std r19,104(r3) err2; std r20,112(r3) err2; std r21,120(r3) addi r3,r3,128 bdnz 4b clrldi r5,r5,(64-7) ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) ld r17,STK_REG(R17)(r1) ld r18,STK_REG(R18)(r1) ld r19,STK_REG(R19)(r1) ld r20,STK_REG(R20)(r1) ld r21,STK_REG(R21)(r1) ld r22,STK_REG(R22)(r1) addi r1,r1,STACKFRAMESIZE /* Up to 127B to go */ 5: srdi r6,r5,4 mtocrf 0x01,r6 6: bf cr7*4+1,7f err1; ld r0,0(r4) err1; ld r6,8(r4) err1; ld r7,16(r4) err1; ld r8,24(r4) err1; ld r9,32(r4) err1; ld r10,40(r4) err1; ld r11,48(r4) err1; ld r12,56(r4) addi r4,r4,64 err1; std r0,0(r3) err1; std r6,8(r3) err1; std r7,16(r3) err1; std r8,24(r3) err1; std r9,32(r3) err1; std r10,40(r3) err1; std r11,48(r3) err1; std r12,56(r3) addi r3,r3,64 /* Up to 63B to go */ 7: bf cr7*4+2,8f err1; ld r0,0(r4) err1; ld r6,8(r4) err1; ld r7,16(r4) err1; ld r8,24(r4) addi r4,r4,32 err1; std r0,0(r3) err1; std r6,8(r3) err1; std r7,16(r3) err1; std r8,24(r3) addi r3,r3,32 /* Up to 31B to go */ 8: bf cr7*4+3,9f err1; ld r0,0(r4) err1; ld r6,8(r4) addi r4,r4,16 err1; std r0,0(r3) err1; std r6,8(r3) addi r3,r3,16 9: clrldi r5,r5,(64-4) /* Up to 15B to go */ .Lshort_copy: mtocrf 0x01,r5 bf cr7*4+0,12f err1; lwz r0,0(r4) /* Less chance of a reject with word ops */ err1; lwz r6,4(r4) addi r4,r4,8 err1; stw r0,0(r3) err1; stw r6,4(r3) addi r3,r3,8 12: bf cr7*4+1,13f err1; lwz r0,0(r4) addi r4,r4,4 err1; stw r0,0(r3) addi r3,r3,4 13: bf cr7*4+2,14f err1; lhz r0,0(r4) addi r4,r4,2 err1; sth r0,0(r3) addi r3,r3,2 14: bf cr7*4+3,15f err1; lbz r0,0(r4) err1; stb r0,0(r3) 15: li r3,0 blr .Lunwind_stack_nonvmx_copy: addi r1,r1,STACKFRAMESIZE b .Lnonvmx_copy .Lvmx_copy: #ifdef CONFIG_ALTIVEC mflr r0 std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_usercopy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STK_REG(R31)(r1) ld r4,STK_REG(R30)(r1) ld r5,STK_REG(R29)(r1) mtlr r0 /* * We prefetch both the source and destination using enhanced touch * instructions. We use a stream ID of 0 for the load side and * 1 for the store side. */ clrrdi r6,r4,7 clrrdi r9,r3,7 ori r9,r9,1 /* stream=1 */ srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */ cmpldi r7,0x3FF ble 1f li r7,0x3FF 1: lis r0,0x0E00 /* depth=7 */ sldi r7,r7,7 or r7,r7,r0 ori r10,r7,1 /* stream=1 */ lis r8,0x8000 /* GO=1 */ clrldi r8,r8,32 /* setup read stream 0 */ dcbt 0,r6,0b01000 /* addr from */ dcbt 0,r7,0b01010 /* length and depth from */ /* setup write stream 1 */ dcbtst 0,r9,0b01000 /* addr to */ dcbtst 0,r10,0b01010 /* length and depth to */ eieio dcbt 0,r8,0b01010 /* all streams GO */ beq cr1,.Lunwind_stack_nonvmx_copy /* * If source and destination are not relatively aligned we use a * slower permute loop. */ xor r6,r4,r3 rldicl. r6,r6,0,(64-4) bne .Lvmx_unaligned_copy /* Get the destination 16B aligned */ neg r6,r3 mtocrf 0x01,r6 clrldi r6,r6,(64-4) bf cr7*4+3,1f err3; lbz r0,0(r4) addi r4,r4,1 err3; stb r0,0(r3) addi r3,r3,1 1: bf cr7*4+2,2f err3; lhz r0,0(r4) addi r4,r4,2 err3; sth r0,0(r3) addi r3,r3,2 2: bf cr7*4+1,3f err3; lwz r0,0(r4) addi r4,r4,4 err3; stw r0,0(r3) addi r3,r3,4 3: bf cr7*4+0,4f err3; ld r0,0(r4) addi r4,r4,8 err3; std r0,0(r3) addi r3,r3,8 4: sub r5,r5,r6 /* Get the desination 128B aligned */ neg r6,r3 srdi r7,r6,4 mtocrf 0x01,r7 clrldi r6,r6,(64-7) li r9,16 li r10,32 li r11,48 bf cr7*4+3,5f err3; lvx v1,0,r4 addi r4,r4,16 err3; stvx v1,0,r3 addi r3,r3,16 5: bf cr7*4+2,6f err3; lvx v1,0,r4 err3; lvx v0,r4,r9 addi r4,r4,32 err3; stvx v1,0,r3 err3; stvx v0,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f err3; lvx v3,0,r4 err3; lvx v2,r4,r9 err3; lvx v1,r4,r10 err3; lvx v0,r4,r11 addi r4,r4,64 err3; stvx v3,0,r3 err3; stvx v2,r3,r9 err3; stvx v1,r3,r10 err3; stvx v0,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 srdi r6,r5,7 std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) li r12,64 li r14,80 li r15,96 li r16,112 mtctr r6 /* * Now do cacheline sized loads and stores. By this stage the * cacheline stores are also cacheline aligned. */ .align 5 8: err4; lvx v7,0,r4 err4; lvx v6,r4,r9 err4; lvx v5,r4,r10 err4; lvx v4,r4,r11 err4; lvx v3,r4,r12 err4; lvx v2,r4,r14 err4; lvx v1,r4,r15 err4; lvx v0,r4,r16 addi r4,r4,128 err4; stvx v7,0,r3 err4; stvx v6,r3,r9 err4; stvx v5,r3,r10 err4; stvx v4,r3,r11 err4; stvx v3,r3,r12 err4; stvx v2,r3,r14 err4; stvx v1,r3,r15 err4; stvx v0,r3,r16 addi r3,r3,128 bdnz 8b ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) srdi r6,r5,4 mtocrf 0x01,r6 bf cr7*4+1,9f err3; lvx v3,0,r4 err3; lvx v2,r4,r9 err3; lvx v1,r4,r10 err3; lvx v0,r4,r11 addi r4,r4,64 err3; stvx v3,0,r3 err3; stvx v2,r3,r9 err3; stvx v1,r3,r10 err3; stvx v0,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f err3; lvx v1,0,r4 err3; lvx v0,r4,r9 addi r4,r4,32 err3; stvx v1,0,r3 err3; stvx v0,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f err3; lvx v1,0,r4 addi r4,r4,16 err3; stvx v1,0,r3 addi r3,r3,16 /* Up to 15B to go */ 11: clrldi r5,r5,(64-4) mtocrf 0x01,r5 bf cr7*4+0,12f err3; ld r0,0(r4) addi r4,r4,8 err3; std r0,0(r3) addi r3,r3,8 12: bf cr7*4+1,13f err3; lwz r0,0(r4) addi r4,r4,4 err3; stw r0,0(r3) addi r3,r3,4 13: bf cr7*4+2,14f err3; lhz r0,0(r4) addi r4,r4,2 err3; sth r0,0(r3) addi r3,r3,2 14: bf cr7*4+3,15f err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE b exit_vmx_usercopy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ neg r6,r3 mtocrf 0x01,r6 clrldi r6,r6,(64-4) bf cr7*4+3,1f err3; lbz r0,0(r4) addi r4,r4,1 err3; stb r0,0(r3) addi r3,r3,1 1: bf cr7*4+2,2f err3; lhz r0,0(r4) addi r4,r4,2 err3; sth r0,0(r3) addi r3,r3,2 2: bf cr7*4+1,3f err3; lwz r0,0(r4) addi r4,r4,4 err3; stw r0,0(r3) addi r3,r3,4 3: bf cr7*4+0,4f err3; lwz r0,0(r4) /* Less chance of a reject with word ops */ err3; lwz r7,4(r4) addi r4,r4,8 err3; stw r0,0(r3) err3; stw r7,4(r3) addi r3,r3,8 4: sub r5,r5,r6 /* Get the desination 128B aligned */ neg r6,r3 srdi r7,r6,4 mtocrf 0x01,r7 clrldi r6,r6,(64-7) li r9,16 li r10,32 li r11,48 LVS(v16,0,r4) /* Setup permute control vector */ err3; lvx v0,0,r4 addi r4,r4,16 bf cr7*4+3,5f err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 err3; stvx v8,0,r3 addi r3,r3,16 vor v0,v1,v1 5: bf cr7*4+2,6f err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) err3; lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 err3; stvx v8,0,r3 err3; stvx v9,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f err3; lvx v3,0,r4 VPERM(v8,v0,v3,v16) err3; lvx v2,r4,r9 VPERM(v9,v3,v2,v16) err3; lvx v1,r4,r10 VPERM(v10,v2,v1,v16) err3; lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 err3; stvx v8,0,r3 err3; stvx v9,r3,r9 err3; stvx v10,r3,r10 err3; stvx v11,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 srdi r6,r5,7 std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) li r12,64 li r14,80 li r15,96 li r16,112 mtctr r6 /* * Now do cacheline sized loads and stores. By this stage the * cacheline stores are also cacheline aligned. */ .align 5 8: err4; lvx v7,0,r4 VPERM(v8,v0,v7,v16) err4; lvx v6,r4,r9 VPERM(v9,v7,v6,v16) err4; lvx v5,r4,r10 VPERM(v10,v6,v5,v16) err4; lvx v4,r4,r11 VPERM(v11,v5,v4,v16) err4; lvx v3,r4,r12 VPERM(v12,v4,v3,v16) err4; lvx v2,r4,r14 VPERM(v13,v3,v2,v16) err4; lvx v1,r4,r15 VPERM(v14,v2,v1,v16) err4; lvx v0,r4,r16 VPERM(v15,v1,v0,v16) addi r4,r4,128 err4; stvx v8,0,r3 err4; stvx v9,r3,r9 err4; stvx v10,r3,r10 err4; stvx v11,r3,r11 err4; stvx v12,r3,r12 err4; stvx v13,r3,r14 err4; stvx v14,r3,r15 err4; stvx v15,r3,r16 addi r3,r3,128 bdnz 8b ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) /* Up to 127B to go */ clrldi r5,r5,(64-7) srdi r6,r5,4 mtocrf 0x01,r6 bf cr7*4+1,9f err3; lvx v3,0,r4 VPERM(v8,v0,v3,v16) err3; lvx v2,r4,r9 VPERM(v9,v3,v2,v16) err3; lvx v1,r4,r10 VPERM(v10,v2,v1,v16) err3; lvx v0,r4,r11 VPERM(v11,v1,v0,v16) addi r4,r4,64 err3; stvx v8,0,r3 err3; stvx v9,r3,r9 err3; stvx v10,r3,r10 err3; stvx v11,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) err3; lvx v0,r4,r9 VPERM(v9,v1,v0,v16) addi r4,r4,32 err3; stvx v8,0,r3 err3; stvx v9,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f err3; lvx v1,0,r4 VPERM(v8,v0,v1,v16) addi r4,r4,16 err3; stvx v8,0,r3 addi r3,r3,16 /* Up to 15B to go */ 11: clrldi r5,r5,(64-4) addi r4,r4,-16 /* Unwind the +16 load offset */ mtocrf 0x01,r5 bf cr7*4+0,12f err3; lwz r0,0(r4) /* Less chance of a reject with word ops */ err3; lwz r6,4(r4) addi r4,r4,8 err3; stw r0,0(r3) err3; stw r6,4(r3) addi r3,r3,8 12: bf cr7*4+1,13f err3; lwz r0,0(r4) addi r4,r4,4 err3; stw r0,0(r3) addi r3,r3,4 13: bf cr7*4+2,14f err3; lhz r0,0(r4) addi r4,r4,2 err3; sth r0,0(r3) addi r3,r3,2 14: bf cr7*4+3,15f err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE b exit_vmx_usercopy /* tail call optimise */ #endif /* CONFIG_ALTIVEC */
aixcc-public/challenge-001-exemplar-source
1,213
arch/powerpc/lib/string.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * String handling functions for PowerPC. * * Copyright (C) 1996 Paul Mackerras. */ #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/cache.h> .text /* This clears out any unused part of the destination buffer, just as the libc version does. -- paulus */ _GLOBAL(strncpy) PPC_LCMPI 0,r5,0 beqlr mtctr r5 addi r6,r3,-1 addi r4,r4,-1 .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r6) bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ bnelr /* if we didn't hit a null char, we're done */ mfctr r5 PPC_LCMPI 0,r5,0 /* any space left in destination buffer? */ beqlr /* we know r0 == 0 here */ 2: stbu r0,1(r6) /* clear it out if so */ bdnz 2b blr EXPORT_SYMBOL(strncpy) _GLOBAL(strncmp) PPC_LCMPI 0,r5,0 beq- 2f mtctr r5 addi r5,r3,-1 addi r4,r4,-1 .balign IFETCH_ALIGN_BYTES 1: lbzu r3,1(r5) cmpwi 1,r3,0 lbzu r0,1(r4) subf. r3,r0,r3 beqlr 1 bdnzt eq,1b blr 2: li r3,0 blr EXPORT_SYMBOL(strncmp) _GLOBAL(memchr) PPC_LCMPI 0,r5,0 beq- 2f mtctr r5 addi r3,r3,-1 .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r3) cmpw 0,r0,r4 bdnzf 2,1b beqlr 2: li r3,0 blr EXPORT_SYMBOL(memchr)
aixcc-public/challenge-001-exemplar-source
1,866
arch/powerpc/lib/hweight_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * Copyright (C) IBM Corporation, 2010 * * Author: Anton Blanchard <anton@au.ibm.com> */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/feature-fixups.h> /* Note: This code relies on -mminimal-toc */ _GLOBAL(__arch_hweight8) BEGIN_FTR_SECTION b __sw_hweight8 nop nop FTR_SECTION_ELSE PPC_POPCNTB(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) EXPORT_SYMBOL(__arch_hweight8) _GLOBAL(__arch_hweight16) BEGIN_FTR_SECTION b __sw_hweight16 nop nop nop nop FTR_SECTION_ELSE BEGIN_FTR_SECTION_NESTED(50) PPC_POPCNTB(R3,R3) srdi r4,r3,8 add r3,r4,r3 clrldi r3,r3,64-8 blr FTR_SECTION_ELSE_NESTED(50) clrlwi r3,r3,16 PPC_POPCNTW(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) EXPORT_SYMBOL(__arch_hweight16) _GLOBAL(__arch_hweight32) BEGIN_FTR_SECTION b __sw_hweight32 nop nop nop nop nop nop FTR_SECTION_ELSE BEGIN_FTR_SECTION_NESTED(51) PPC_POPCNTB(R3,R3) srdi r4,r3,16 add r3,r4,r3 srdi r4,r3,8 add r3,r4,r3 clrldi r3,r3,64-8 blr FTR_SECTION_ELSE_NESTED(51) PPC_POPCNTW(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) EXPORT_SYMBOL(__arch_hweight32) _GLOBAL(__arch_hweight64) BEGIN_FTR_SECTION b __sw_hweight64 nop nop nop nop nop nop nop nop FTR_SECTION_ELSE BEGIN_FTR_SECTION_NESTED(52) PPC_POPCNTB(R3,R3) srdi r4,r3,32 add r3,r4,r3 srdi r4,r3,16 add r3,r4,r3 srdi r4,r3,8 add r3,r4,r3 clrldi r3,r3,64-8 blr FTR_SECTION_ELSE_NESTED(52) PPC_POPCNTD(R3,R3) clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) EXPORT_SYMBOL(__arch_hweight64)
aixcc-public/challenge-001-exemplar-source
11,151
arch/powerpc/lib/copyuser_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2002 Paul Mackerras, IBM Corp. */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> #ifndef SELFTEST_CASE /* 0 == most CPUs, 1 == POWER6, 2 == Cell */ #define SELFTEST_CASE 0 #endif #ifdef __BIG_ENDIAN__ #define sLd sld /* Shift towards low-numbered address. */ #define sHd srd /* Shift towards high-numbered address. */ #else #define sLd srd /* Shift towards low-numbered address. */ #define sHd sld /* Shift towards high-numbered address. */ #endif /* * These macros are used to generate exception table entries. * The exception handlers below use the original arguments * (stored on the stack) and the point where we're up to in * the destination buffer, i.e. the address of the first * unmodified byte. Generally r3 points into the destination * buffer, but the first unmodified byte is at a variable * offset from r3. In the code below, the symbol r3_offset * is set to indicate the current offset at each point in * the code. This offset is then used as a negative offset * from the exception handler code, and those instructions * before the exception handlers are addi instructions that * adjust r3 to point to the correct place. */ .macro lex /* exception handler for load */ 100: EX_TABLE(100b, .Lld_exc - r3_offset) .endm .macro stex /* exception handler for store */ 100: EX_TABLE(100b, .Lst_exc - r3_offset) .endm .align 7 _GLOBAL_TOC(__copy_tofrom_user) #ifdef CONFIG_PPC_BOOK3S_64 BEGIN_FTR_SECTION nop FTR_SECTION_ELSE b __copy_tofrom_user_power7 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) #endif _GLOBAL(__copy_tofrom_user_base) /* first check for a 4kB copy on a 4kB boundary */ cmpldi cr1,r5,16 cmpdi cr6,r5,4096 or r0,r3,r4 neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */ andi. r0,r0,4095 std r3,-24(r1) crand cr0*4+2,cr0*4+2,cr6*4+2 std r4,-16(r1) std r5,-8(r1) dcbt 0,r4 beq .Lcopy_page_4K andi. r6,r6,7 PPC_MTOCRF(0x01,r5) blt cr1,.Lshort_copy /* Below we want to nop out the bne if we're on a CPU that has the * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit * cleared. * At the time of writing the only CPU that has this combination of bits * set is Power6. */ test_feature = (SELFTEST_CASE == 1) BEGIN_FTR_SECTION nop FTR_SECTION_ELSE bne .Ldst_unaligned ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ CPU_FTR_UNALIGNED_LD_STD) .Ldst_aligned: addi r3,r3,-16 r3_offset = 16 test_feature = (SELFTEST_CASE == 0) BEGIN_FTR_SECTION andi. r0,r4,7 bne .Lsrc_unaligned END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) blt cr1,.Ldo_tail /* if < 16 bytes to copy */ srdi r0,r5,5 cmpdi cr1,r0,0 lex; ld r7,0(r4) lex; ld r6,8(r4) addi r4,r4,16 mtctr r0 andi. r0,r5,0x10 beq 22f addi r3,r3,16 r3_offset = 0 addi r4,r4,-16 mr r9,r7 mr r8,r6 beq cr1,72f 21: lex; ld r7,16(r4) lex; ld r6,24(r4) addi r4,r4,32 stex; std r9,0(r3) r3_offset = 8 stex; std r8,8(r3) r3_offset = 16 22: lex; ld r9,0(r4) lex; ld r8,8(r4) stex; std r7,16(r3) r3_offset = 24 stex; std r6,24(r3) addi r3,r3,32 r3_offset = 0 bdnz 21b 72: stex; std r9,0(r3) r3_offset = 8 stex; std r8,8(r3) r3_offset = 16 andi. r5,r5,0xf beq+ 3f addi r4,r4,16 .Ldo_tail: addi r3,r3,16 r3_offset = 0 bf cr7*4+0,246f lex; ld r9,0(r4) addi r4,r4,8 stex; std r9,0(r3) addi r3,r3,8 246: bf cr7*4+1,1f lex; lwz r9,0(r4) addi r4,r4,4 stex; stw r9,0(r3) addi r3,r3,4 1: bf cr7*4+2,2f lex; lhz r9,0(r4) addi r4,r4,2 stex; sth r9,0(r3) addi r3,r3,2 2: bf cr7*4+3,3f lex; lbz r9,0(r4) stex; stb r9,0(r3) 3: li r3,0 blr .Lsrc_unaligned: r3_offset = 16 srdi r6,r5,3 addi r5,r5,-16 subf r4,r0,r4 srdi r7,r5,4 sldi r10,r0,3 cmpldi cr6,r6,3 andi. r5,r5,7 mtctr r7 subfic r11,r10,64 add r5,r5,r0 bt cr7*4+0,28f lex; ld r9,0(r4) /* 3+2n loads, 2+2n stores */ lex; ld r0,8(r4) sLd r6,r9,r10 lex; ldu r9,16(r4) sHd r7,r0,r11 sLd r8,r0,r10 or r7,r7,r6 blt cr6,79f lex; ld r0,8(r4) b 2f 28: lex; ld r0,0(r4) /* 4+2n loads, 3+2n stores */ lex; ldu r9,8(r4) sLd r8,r0,r10 addi r3,r3,-8 r3_offset = 24 blt cr6,5f lex; ld r0,8(r4) sHd r12,r9,r11 sLd r6,r9,r10 lex; ldu r9,16(r4) or r12,r8,r12 sHd r7,r0,r11 sLd r8,r0,r10 addi r3,r3,16 r3_offset = 8 beq cr6,78f 1: or r7,r7,r6 lex; ld r0,8(r4) stex; std r12,8(r3) r3_offset = 16 2: sHd r12,r9,r11 sLd r6,r9,r10 lex; ldu r9,16(r4) or r12,r8,r12 stex; stdu r7,16(r3) r3_offset = 8 sHd r7,r0,r11 sLd r8,r0,r10 bdnz 1b 78: stex; std r12,8(r3) r3_offset = 16 or r7,r7,r6 79: stex; std r7,16(r3) r3_offset = 24 5: sHd r12,r9,r11 or r12,r8,r12 stex; std r12,24(r3) r3_offset = 32 bne 6f li r3,0 blr 6: cmpwi cr1,r5,8 addi r3,r3,32 r3_offset = 0 sLd r9,r9,r10 ble cr1,7f lex; ld r0,8(r4) sHd r7,r0,r11 or r9,r7,r9 7: bf cr7*4+1,1f #ifdef __BIG_ENDIAN__ rotldi r9,r9,32 #endif stex; stw r9,0(r3) #ifdef __LITTLE_ENDIAN__ rotrdi r9,r9,32 #endif addi r3,r3,4 1: bf cr7*4+2,2f #ifdef __BIG_ENDIAN__ rotldi r9,r9,16 #endif stex; sth r9,0(r3) #ifdef __LITTLE_ENDIAN__ rotrdi r9,r9,16 #endif addi r3,r3,2 2: bf cr7*4+3,3f #ifdef __BIG_ENDIAN__ rotldi r9,r9,8 #endif stex; stb r9,0(r3) #ifdef __LITTLE_ENDIAN__ rotrdi r9,r9,8 #endif 3: li r3,0 blr .Ldst_unaligned: r3_offset = 0 PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */ subf r5,r6,r5 li r7,0 cmpldi cr1,r5,16 bf cr7*4+3,1f 100: EX_TABLE(100b, .Lld_exc_r7) lbz r0,0(r4) 100: EX_TABLE(100b, .Lst_exc_r7) stb r0,0(r3) addi r7,r7,1 1: bf cr7*4+2,2f 100: EX_TABLE(100b, .Lld_exc_r7) lhzx r0,r7,r4 100: EX_TABLE(100b, .Lst_exc_r7) sthx r0,r7,r3 addi r7,r7,2 2: bf cr7*4+1,3f 100: EX_TABLE(100b, .Lld_exc_r7) lwzx r0,r7,r4 100: EX_TABLE(100b, .Lst_exc_r7) stwx r0,r7,r3 3: PPC_MTOCRF(0x01,r5) add r4,r6,r4 add r3,r6,r3 b .Ldst_aligned .Lshort_copy: r3_offset = 0 bf cr7*4+0,1f lex; lwz r0,0(r4) lex; lwz r9,4(r4) addi r4,r4,8 stex; stw r0,0(r3) stex; stw r9,4(r3) addi r3,r3,8 1: bf cr7*4+1,2f lex; lwz r0,0(r4) addi r4,r4,4 stex; stw r0,0(r3) addi r3,r3,4 2: bf cr7*4+2,3f lex; lhz r0,0(r4) addi r4,r4,2 stex; sth r0,0(r3) addi r3,r3,2 3: bf cr7*4+3,4f lex; lbz r0,0(r4) stex; stb r0,0(r3) 4: li r3,0 blr /* * exception handlers follow * we have to return the number of bytes not copied * for an exception on a load, we set the rest of the destination to 0 * Note that the number of bytes of instructions for adjusting r3 needs * to equal the amount of the adjustment, due to the trick of using * .Lld_exc - r3_offset as the handler address. */ .Lld_exc_r7: add r3,r3,r7 b .Lld_exc /* adjust by 24 */ addi r3,r3,8 nop /* adjust by 16 */ addi r3,r3,8 nop /* adjust by 8 */ addi r3,r3,8 nop /* * Here we have had a fault on a load and r3 points to the first * unmodified byte of the destination. We use the original arguments * and r3 to work out how much wasn't copied. Since we load some * distance ahead of the stores, we continue copying byte-by-byte until * we hit the load fault again in order to copy as much as possible. */ .Lld_exc: ld r6,-24(r1) ld r4,-16(r1) ld r5,-8(r1) subf r6,r6,r3 add r4,r4,r6 subf r5,r6,r5 /* #bytes left to go */ /* * first see if we can copy any more bytes before hitting another exception */ mtctr r5 r3_offset = 0 100: EX_TABLE(100b, .Ldone) 43: lbz r0,0(r4) addi r4,r4,1 stex; stb r0,0(r3) addi r3,r3,1 bdnz 43b li r3,0 /* huh? all copied successfully this time? */ blr /* * here we have trapped again, amount remaining is in ctr. */ .Ldone: mfctr r3 blr /* * exception handlers for stores: we need to work out how many bytes * weren't copied, and we may need to copy some more. * Note that the number of bytes of instructions for adjusting r3 needs * to equal the amount of the adjustment, due to the trick of using * .Lst_exc - r3_offset as the handler address. */ .Lst_exc_r7: add r3,r3,r7 b .Lst_exc /* adjust by 24 */ addi r3,r3,8 nop /* adjust by 16 */ addi r3,r3,8 nop /* adjust by 8 */ addi r3,r3,4 /* adjust by 4 */ addi r3,r3,4 .Lst_exc: ld r6,-24(r1) /* original destination pointer */ ld r4,-16(r1) /* original source pointer */ ld r5,-8(r1) /* original number of bytes */ add r7,r6,r5 /* * If the destination pointer isn't 8-byte aligned, * we may have got the exception as a result of a * store that overlapped a page boundary, so we may be * able to copy a few more bytes. */ 17: andi. r0,r3,7 beq 19f subf r8,r6,r3 /* #bytes copied */ 100: EX_TABLE(100b,19f) lbzx r0,r8,r4 100: EX_TABLE(100b,19f) stb r0,0(r3) addi r3,r3,1 cmpld r3,r7 blt 17b 19: subf r3,r3,r7 /* #bytes not copied in r3 */ blr /* * Routine to copy a whole page of data, optimized for POWER4. * On POWER4 it is more than 50% faster than the simple loop * above (following the .Ldst_aligned label). */ .macro exc 100: EX_TABLE(100b, .Labort) .endm .Lcopy_page_4K: std r31,-32(1) std r30,-40(1) std r29,-48(1) std r28,-56(1) std r27,-64(1) std r26,-72(1) std r25,-80(1) std r24,-88(1) std r23,-96(1) std r22,-104(1) std r21,-112(1) std r20,-120(1) li r5,4096/32 - 1 addi r3,r3,-8 li r0,5 0: addi r5,r5,-24 mtctr r0 exc; ld r22,640(4) exc; ld r21,512(4) exc; ld r20,384(4) exc; ld r11,256(4) exc; ld r9,128(4) exc; ld r7,0(4) exc; ld r25,648(4) exc; ld r24,520(4) exc; ld r23,392(4) exc; ld r10,264(4) exc; ld r8,136(4) exc; ldu r6,8(4) cmpwi r5,24 1: exc; std r22,648(3) exc; std r21,520(3) exc; std r20,392(3) exc; std r11,264(3) exc; std r9,136(3) exc; std r7,8(3) exc; ld r28,648(4) exc; ld r27,520(4) exc; ld r26,392(4) exc; ld r31,264(4) exc; ld r30,136(4) exc; ld r29,8(4) exc; std r25,656(3) exc; std r24,528(3) exc; std r23,400(3) exc; std r10,272(3) exc; std r8,144(3) exc; std r6,16(3) exc; ld r22,656(4) exc; ld r21,528(4) exc; ld r20,400(4) exc; ld r11,272(4) exc; ld r9,144(4) exc; ld r7,16(4) exc; std r28,664(3) exc; std r27,536(3) exc; std r26,408(3) exc; std r31,280(3) exc; std r30,152(3) exc; stdu r29,24(3) exc; ld r25,664(4) exc; ld r24,536(4) exc; ld r23,408(4) exc; ld r10,280(4) exc; ld r8,152(4) exc; ldu r6,24(4) bdnz 1b exc; std r22,648(3) exc; std r21,520(3) exc; std r20,392(3) exc; std r11,264(3) exc; std r9,136(3) exc; std r7,8(3) addi r4,r4,640 addi r3,r3,648 bge 0b mtctr r5 exc; ld r7,0(4) exc; ld r8,8(4) exc; ldu r9,16(4) 3: exc; ld r10,8(4) exc; std r7,8(3) exc; ld r7,16(4) exc; std r8,16(3) exc; ld r8,24(4) exc; std r9,24(3) exc; ldu r9,32(4) exc; stdu r10,32(3) bdnz 3b 4: exc; ld r10,8(4) exc; std r7,8(3) exc; std r8,16(3) exc; std r9,24(3) exc; std r10,32(3) 9: ld r20,-120(1) ld r21,-112(1) ld r22,-104(1) ld r23,-96(1) ld r24,-88(1) ld r25,-80(1) ld r26,-72(1) ld r27,-64(1) ld r28,-56(1) ld r29,-48(1) ld r30,-40(1) ld r31,-32(1) li r3,0 blr /* * on an exception, reset to the beginning and jump back into the * standard __copy_tofrom_user */ .Labort: ld r20,-120(1) ld r21,-112(1) ld r22,-104(1) ld r23,-96(1) ld r24,-88(1) ld r25,-80(1) ld r26,-72(1) ld r27,-64(1) ld r28,-56(1) ld r29,-48(1) ld r30,-40(1) ld r31,-32(1) ld r3,-24(r1) ld r4,-16(r1) li r5,4096 b .Ldst_aligned EXPORT_SYMBOL(__copy_tofrom_user)
aixcc-public/challenge-001-exemplar-source
6,420
arch/powerpc/lib/checksum_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains assembly-language implementations * of IP-style 1's complement checksum routines. * * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au). */ #include <linux/sys.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/ppc_asm.h> #include <asm/export.h> .text /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * __csum_partial(buff, len, sum) */ _GLOBAL(__csum_partial) subi r3,r3,4 srawi. r6,r4,2 /* Divide len by 4 and also clear carry */ beq 3f /* if we're doing < 4 bytes */ andi. r0,r3,2 /* Align buffer to longword boundary */ beq+ 1f lhz r0,4(r3) /* do 2 bytes to get aligned */ subi r4,r4,2 addi r3,r3,2 srwi. r6,r4,2 /* # words to do */ adde r5,r5,r0 beq 3f 1: andi. r6,r6,3 /* Prepare to handle words 4 by 4 */ beq 21f mtctr r6 2: lwzu r0,4(r3) adde r5,r5,r0 bdnz 2b 21: srwi. r6,r4,4 /* # blocks of 4 words to do */ beq 3f lwz r0,4(r3) mtctr r6 lwz r6,8(r3) adde r5,r5,r0 lwz r7,12(r3) adde r5,r5,r6 lwzu r8,16(r3) adde r5,r5,r7 bdz 23f 22: lwz r0,4(r3) adde r5,r5,r8 lwz r6,8(r3) adde r5,r5,r0 lwz r7,12(r3) adde r5,r5,r6 lwzu r8,16(r3) adde r5,r5,r7 bdnz 22b 23: adde r5,r5,r8 3: andi. r0,r4,2 beq+ 4f lhz r0,4(r3) addi r3,r3,2 adde r5,r5,r0 4: andi. r0,r4,1 beq+ 5f lbz r0,4(r3) slwi r0,r0,8 /* Upper byte of word */ adde r5,r5,r0 5: addze r3,r5 /* add in final carry */ blr EXPORT_SYMBOL(__csum_partial) /* * Computes the checksum of a memory block at src, length len, * and adds in 0xffffffff, while copying the block to dst. * If an access exception occurs it returns zero. * * csum_partial_copy_generic(src, dst, len) */ #define CSUM_COPY_16_BYTES_WITHEX(n) \ 8 ## n ## 0: \ lwz r7,4(r4); \ 8 ## n ## 1: \ lwz r8,8(r4); \ 8 ## n ## 2: \ lwz r9,12(r4); \ 8 ## n ## 3: \ lwzu r10,16(r4); \ 8 ## n ## 4: \ stw r7,4(r6); \ adde r12,r12,r7; \ 8 ## n ## 5: \ stw r8,8(r6); \ adde r12,r12,r8; \ 8 ## n ## 6: \ stw r9,12(r6); \ adde r12,r12,r9; \ 8 ## n ## 7: \ stwu r10,16(r6); \ adde r12,r12,r10 #define CSUM_COPY_16_BYTES_EXCODE(n) \ EX_TABLE(8 ## n ## 0b, fault); \ EX_TABLE(8 ## n ## 1b, fault); \ EX_TABLE(8 ## n ## 2b, fault); \ EX_TABLE(8 ## n ## 3b, fault); \ EX_TABLE(8 ## n ## 4b, fault); \ EX_TABLE(8 ## n ## 5b, fault); \ EX_TABLE(8 ## n ## 6b, fault); \ EX_TABLE(8 ## n ## 7b, fault); .text CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) _GLOBAL(csum_partial_copy_generic) li r12,-1 addic r0,r0,0 /* clear carry */ addi r6,r4,-4 neg r0,r4 addi r4,r3,-4 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ crset 4*cr7+eq beq 58f cmplw 0,r5,r0 /* is this more than total to do? */ blt 63f /* if not much to do */ rlwinm r7,r6,3,0x8 rlwnm r12,r12,r7,0,31 /* odd destination address: rotate one byte */ cmplwi cr7,r7,0 /* is destination address even ? */ andi. r8,r0,3 /* get it word-aligned first */ mtctr r8 beq+ 61f li r3,0 70: lbz r9,4(r4) /* do some bytes */ addi r4,r4,1 slwi r3,r3,8 rlwimi r3,r9,0,24,31 71: stb r9,4(r6) addi r6,r6,1 bdnz 70b adde r12,r12,r3 61: subf r5,r0,r5 srwi. r0,r0,2 mtctr r0 beq 58f 72: lwzu r9,4(r4) /* do some words */ adde r12,r12,r9 73: stwu r9,4(r6) bdnz 72b 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ clrlwi r5,r5,32-LG_CACHELINE_BYTES li r11,4 beq 63f /* Here we decide how far ahead to prefetch the source */ li r3,4 cmpwi r0,1 li r7,0 ble 114f li r7,1 #if MAX_COPY_PREFETCH > 1 /* Heuristically, for large transfers we prefetch MAX_COPY_PREFETCH cachelines ahead. For small transfers we prefetch 1 cacheline ahead. */ cmpwi r0,MAX_COPY_PREFETCH ble 112f li r7,MAX_COPY_PREFETCH 112: mtctr r7 111: dcbt r3,r4 addi r3,r3,CACHELINE_BYTES bdnz 111b #else dcbt r3,r4 addi r3,r3,CACHELINE_BYTES #endif /* MAX_COPY_PREFETCH > 1 */ 114: subf r8,r7,r0 mr r0,r7 mtctr r8 53: dcbt r3,r4 54: dcbz r11,r6 /* the main body of the cacheline loop */ CSUM_COPY_16_BYTES_WITHEX(0) #if L1_CACHE_BYTES >= 32 CSUM_COPY_16_BYTES_WITHEX(1) #if L1_CACHE_BYTES >= 64 CSUM_COPY_16_BYTES_WITHEX(2) CSUM_COPY_16_BYTES_WITHEX(3) #if L1_CACHE_BYTES >= 128 CSUM_COPY_16_BYTES_WITHEX(4) CSUM_COPY_16_BYTES_WITHEX(5) CSUM_COPY_16_BYTES_WITHEX(6) CSUM_COPY_16_BYTES_WITHEX(7) #endif #endif #endif bdnz 53b cmpwi r0,0 li r3,4 li r7,0 bne 114b 63: srwi. r0,r5,2 mtctr r0 beq 64f 30: lwzu r0,4(r4) adde r12,r12,r0 31: stwu r0,4(r6) bdnz 30b 64: andi. r0,r5,2 beq+ 65f 40: lhz r0,4(r4) addi r4,r4,2 41: sth r0,4(r6) adde r12,r12,r0 addi r6,r6,2 65: andi. r0,r5,1 beq+ 66f 50: lbz r0,4(r4) 51: stb r0,4(r6) slwi r0,r0,8 adde r12,r12,r0 66: addze r3,r12 beqlr+ cr7 rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */ blr fault: li r3,0 blr EX_TABLE(70b, fault); EX_TABLE(71b, fault); EX_TABLE(72b, fault); EX_TABLE(73b, fault); EX_TABLE(54b, fault); /* * this stuff handles faults in the cacheline loop and branches to either * fault (if in read part) or fault (if in write part) */ CSUM_COPY_16_BYTES_EXCODE(0) #if L1_CACHE_BYTES >= 32 CSUM_COPY_16_BYTES_EXCODE(1) #if L1_CACHE_BYTES >= 64 CSUM_COPY_16_BYTES_EXCODE(2) CSUM_COPY_16_BYTES_EXCODE(3) #if L1_CACHE_BYTES >= 128 CSUM_COPY_16_BYTES_EXCODE(4) CSUM_COPY_16_BYTES_EXCODE(5) CSUM_COPY_16_BYTES_EXCODE(6) CSUM_COPY_16_BYTES_EXCODE(7) #endif #endif #endif EX_TABLE(30b, fault); EX_TABLE(31b, fault); EX_TABLE(40b, fault); EX_TABLE(41b, fault); EX_TABLE(50b, fault); EX_TABLE(51b, fault); EXPORT_SYMBOL(csum_partial_copy_generic) /* * __sum16 csum_ipv6_magic(const struct in6_addr *saddr, * const struct in6_addr *daddr, * __u32 len, __u8 proto, __wsum sum) */ _GLOBAL(csum_ipv6_magic) lwz r8, 0(r3) lwz r9, 4(r3) addc r0, r7, r8 lwz r10, 8(r3) adde r0, r0, r9 lwz r11, 12(r3) adde r0, r0, r10 lwz r8, 0(r4) adde r0, r0, r11 lwz r9, 4(r4) adde r0, r0, r8 lwz r10, 8(r4) adde r0, r0, r9 lwz r11, 12(r4) adde r0, r0, r10 add r5, r5, r6 /* assumption: len + proto doesn't carry */ adde r0, r0, r11 adde r0, r0, r5 addze r0, r0 rotlwi r3, r0, 16 add r3, r0, r3 not r3, r3 rlwinm r3, r3, 16, 16, 31 blr EXPORT_SYMBOL(csum_ipv6_magic)
aixcc-public/challenge-001-exemplar-source
11,692
arch/powerpc/lib/memcmp_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Author: Anton Blanchard <anton@au.ibm.com> * Copyright 2015 IBM Corporation. */ #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/ppc-opcode.h> #define off8 r6 #define off16 r7 #define off24 r8 #define rA r9 #define rB r10 #define rC r11 #define rD r27 #define rE r28 #define rF r29 #define rG r30 #define rH r31 #ifdef __LITTLE_ENDIAN__ #define LH lhbrx #define LW lwbrx #define LD ldbrx #define LVS lvsr #define VPERM(_VRT,_VRA,_VRB,_VRC) \ vperm _VRT,_VRB,_VRA,_VRC #else #define LH lhzx #define LW lwzx #define LD ldx #define LVS lvsl #define VPERM(_VRT,_VRA,_VRB,_VRC) \ vperm _VRT,_VRA,_VRB,_VRC #endif #define VMX_THRESH 4096 #define ENTER_VMX_OPS \ mflr r0; \ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \ std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \ std r0,16(r1); \ stdu r1,-STACKFRAMESIZE(r1); \ bl enter_vmx_ops; \ cmpwi cr1,r3,0; \ ld r0,STACKFRAMESIZE+16(r1); \ ld r3,STK_REG(R31)(r1); \ ld r4,STK_REG(R30)(r1); \ ld r5,STK_REG(R29)(r1); \ addi r1,r1,STACKFRAMESIZE; \ mtlr r0 #define EXIT_VMX_OPS \ mflr r0; \ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \ std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \ std r0,16(r1); \ stdu r1,-STACKFRAMESIZE(r1); \ bl exit_vmx_ops; \ ld r0,STACKFRAMESIZE+16(r1); \ ld r3,STK_REG(R31)(r1); \ ld r4,STK_REG(R30)(r1); \ ld r5,STK_REG(R29)(r1); \ addi r1,r1,STACKFRAMESIZE; \ mtlr r0 /* * LD_VSR_CROSS16B load the 2nd 16 bytes for _vaddr which is unaligned with * 16 bytes boundary and permute the result with the 1st 16 bytes. * | y y y y y y y y y y y y y 0 1 2 | 3 4 5 6 7 8 9 a b c d e f z z z | * ^ ^ ^ * 0xbbbb10 0xbbbb20 0xbbb30 * ^ * _vaddr * * * _vmask is the mask generated by LVS * _v1st_qw is the 1st aligned QW of current addr which is already loaded. * for example: 0xyyyyyyyyyyyyy012 for big endian * _v2nd_qw is the 2nd aligned QW of cur _vaddr to be loaded. * for example: 0x3456789abcdefzzz for big endian * The permute result is saved in _v_res. * for example: 0x0123456789abcdef for big endian. */ #define LD_VSR_CROSS16B(_vaddr,_vmask,_v1st_qw,_v2nd_qw,_v_res) \ lvx _v2nd_qw,_vaddr,off16; \ VPERM(_v_res,_v1st_qw,_v2nd_qw,_vmask) /* * There are 2 categories for memcmp: * 1) src/dst has the same offset to the 8 bytes boundary. The handlers * are named like .Lsameoffset_xxxx * 2) src/dst has different offset to the 8 bytes boundary. The handlers * are named like .Ldiffoffset_xxxx */ _GLOBAL_TOC(memcmp) cmpdi cr1,r5,0 /* Use the short loop if the src/dst addresses are not * with the same offset of 8 bytes align boundary. */ xor r6,r3,r4 andi. r6,r6,7 /* Fall back to short loop if compare at aligned addrs * with less than 8 bytes. */ cmpdi cr6,r5,7 beq cr1,.Lzero bgt cr6,.Lno_short .Lshort: mtctr r5 1: lbz rA,0(r3) lbz rB,0(r4) subf. rC,rB,rA bne .Lnon_zero bdz .Lzero lbz rA,1(r3) lbz rB,1(r4) subf. rC,rB,rA bne .Lnon_zero bdz .Lzero lbz rA,2(r3) lbz rB,2(r4) subf. rC,rB,rA bne .Lnon_zero bdz .Lzero lbz rA,3(r3) lbz rB,3(r4) subf. rC,rB,rA bne .Lnon_zero addi r3,r3,4 addi r4,r4,4 bdnz 1b .Lzero: li r3,0 blr .Lno_short: dcbt 0,r3 dcbt 0,r4 bne .Ldiffoffset_8bytes_make_align_start .Lsameoffset_8bytes_make_align_start: /* attempt to compare bytes not aligned with 8 bytes so that * rest comparison can run based on 8 bytes alignment. */ andi. r6,r3,7 /* Try to compare the first double word which is not 8 bytes aligned: * load the first double word at (src & ~7UL) and shift left appropriate * bits before comparision. */ rlwinm r6,r3,3,26,28 beq .Lsameoffset_8bytes_aligned clrrdi r3,r3,3 clrrdi r4,r4,3 LD rA,0,r3 LD rB,0,r4 sld rA,rA,r6 sld rB,rB,r6 cmpld cr0,rA,rB srwi r6,r6,3 bne cr0,.LcmpAB_lightweight subfic r6,r6,8 subf. r5,r6,r5 addi r3,r3,8 addi r4,r4,8 beq .Lzero .Lsameoffset_8bytes_aligned: /* now we are aligned with 8 bytes. * Use .Llong loop if left cmp bytes are equal or greater than 32B. */ cmpdi cr6,r5,31 bgt cr6,.Llong .Lcmp_lt32bytes: /* compare 1 ~ 31 bytes, at least r3 addr is 8 bytes aligned now */ cmpdi cr5,r5,7 srdi r0,r5,3 ble cr5,.Lcmp_rest_lt8bytes /* handle 8 ~ 31 bytes */ clrldi r5,r5,61 mtctr r0 2: LD rA,0,r3 LD rB,0,r4 cmpld cr0,rA,rB addi r3,r3,8 addi r4,r4,8 bne cr0,.LcmpAB_lightweight bdnz 2b cmpwi r5,0 beq .Lzero .Lcmp_rest_lt8bytes: /* * Here we have less than 8 bytes to compare. At least s1 is aligned to * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a * page boundary, otherwise we might read past the end of the buffer and * trigger a page fault. We use 4K as the conservative minimum page * size. If we detect that case we go to the byte-by-byte loop. * * Otherwise the next double word is loaded from s1 and s2, and shifted * right to compare the appropriate bits. */ clrldi r6,r4,(64-12) // r6 = r4 & 0xfff cmpdi r6,0xff8 bgt .Lshort subfic r6,r5,8 slwi r6,r6,3 LD rA,0,r3 LD rB,0,r4 srd rA,rA,r6 srd rB,rB,r6 cmpld cr0,rA,rB bne cr0,.LcmpAB_lightweight b .Lzero .Lnon_zero: mr r3,rC blr .Llong: #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION /* Try to use vmx loop if length is equal or greater than 4K */ cmpldi cr6,r5,VMX_THRESH bge cr6,.Lsameoffset_vmx_cmp END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) .Llong_novmx_cmp: #endif /* At least s1 addr is aligned with 8 bytes */ li off8,8 li off16,16 li off24,24 std r31,-8(r1) std r30,-16(r1) std r29,-24(r1) std r28,-32(r1) std r27,-40(r1) srdi r0,r5,5 mtctr r0 andi. r5,r5,31 LD rA,0,r3 LD rB,0,r4 LD rC,off8,r3 LD rD,off8,r4 LD rE,off16,r3 LD rF,off16,r4 LD rG,off24,r3 LD rH,off24,r4 cmpld cr0,rA,rB addi r3,r3,32 addi r4,r4,32 bdz .Lfirst32 LD rA,0,r3 LD rB,0,r4 cmpld cr1,rC,rD LD rC,off8,r3 LD rD,off8,r4 cmpld cr6,rE,rF LD rE,off16,r3 LD rF,off16,r4 cmpld cr7,rG,rH bne cr0,.LcmpAB LD rG,off24,r3 LD rH,off24,r4 cmpld cr0,rA,rB bne cr1,.LcmpCD addi r3,r3,32 addi r4,r4,32 bdz .Lsecond32 .balign 16 1: LD rA,0,r3 LD rB,0,r4 cmpld cr1,rC,rD bne cr6,.LcmpEF LD rC,off8,r3 LD rD,off8,r4 cmpld cr6,rE,rF bne cr7,.LcmpGH LD rE,off16,r3 LD rF,off16,r4 cmpld cr7,rG,rH bne cr0,.LcmpAB LD rG,off24,r3 LD rH,off24,r4 cmpld cr0,rA,rB bne cr1,.LcmpCD addi r3,r3,32 addi r4,r4,32 bdnz 1b .Lsecond32: cmpld cr1,rC,rD bne cr6,.LcmpEF cmpld cr6,rE,rF bne cr7,.LcmpGH cmpld cr7,rG,rH bne cr0,.LcmpAB bne cr1,.LcmpCD bne cr6,.LcmpEF bne cr7,.LcmpGH .Ltail: ld r31,-8(r1) ld r30,-16(r1) ld r29,-24(r1) ld r28,-32(r1) ld r27,-40(r1) cmpdi r5,0 beq .Lzero b .Lshort .Lfirst32: cmpld cr1,rC,rD cmpld cr6,rE,rF cmpld cr7,rG,rH bne cr0,.LcmpAB bne cr1,.LcmpCD bne cr6,.LcmpEF bne cr7,.LcmpGH b .Ltail .LcmpAB: li r3,1 bgt cr0,.Lout li r3,-1 b .Lout .LcmpCD: li r3,1 bgt cr1,.Lout li r3,-1 b .Lout .LcmpEF: li r3,1 bgt cr6,.Lout li r3,-1 b .Lout .LcmpGH: li r3,1 bgt cr7,.Lout li r3,-1 .Lout: ld r31,-8(r1) ld r30,-16(r1) ld r29,-24(r1) ld r28,-32(r1) ld r27,-40(r1) blr .LcmpAB_lightweight: /* skip NV GPRS restore */ li r3,1 bgtlr li r3,-1 blr #ifdef CONFIG_ALTIVEC .Lsameoffset_vmx_cmp: /* Enter with src/dst addrs has the same offset with 8 bytes * align boundary. * * There is an optimization based on following fact: memcmp() * prones to fail early at the first 32 bytes. * Before applying VMX instructions which will lead to 32x128bits * VMX regs load/restore penalty, we compare the first 32 bytes * so that we can catch the ~80% fail cases. */ li r0,4 mtctr r0 .Lsameoffset_prechk_32B_loop: LD rA,0,r3 LD rB,0,r4 cmpld cr0,rA,rB addi r3,r3,8 addi r4,r4,8 bne cr0,.LcmpAB_lightweight addi r5,r5,-8 bdnz .Lsameoffset_prechk_32B_loop ENTER_VMX_OPS beq cr1,.Llong_novmx_cmp 3: /* need to check whether r4 has the same offset with r3 * for 16 bytes boundary. */ xor r0,r3,r4 andi. r0,r0,0xf bne .Ldiffoffset_vmx_cmp_start /* len is no less than 4KB. Need to align with 16 bytes further. */ andi. rA,r3,8 LD rA,0,r3 beq 4f LD rB,0,r4 cmpld cr0,rA,rB addi r3,r3,8 addi r4,r4,8 addi r5,r5,-8 beq cr0,4f /* save and restore cr0 */ mfocrf r5,128 EXIT_VMX_OPS mtocrf 128,r5 b .LcmpAB_lightweight 4: /* compare 32 bytes for each loop */ srdi r0,r5,5 mtctr r0 clrldi r5,r5,59 li off16,16 .balign 16 5: lvx v0,0,r3 lvx v1,0,r4 VCMPEQUD_RC(v0,v0,v1) bnl cr6,7f lvx v0,off16,r3 lvx v1,off16,r4 VCMPEQUD_RC(v0,v0,v1) bnl cr6,6f addi r3,r3,32 addi r4,r4,32 bdnz 5b EXIT_VMX_OPS cmpdi r5,0 beq .Lzero b .Lcmp_lt32bytes 6: addi r3,r3,16 addi r4,r4,16 7: /* diff the last 16 bytes */ EXIT_VMX_OPS LD rA,0,r3 LD rB,0,r4 cmpld cr0,rA,rB li off8,8 bne cr0,.LcmpAB_lightweight LD rA,off8,r3 LD rB,off8,r4 cmpld cr0,rA,rB bne cr0,.LcmpAB_lightweight b .Lzero #endif .Ldiffoffset_8bytes_make_align_start: /* now try to align s1 with 8 bytes */ rlwinm r6,r3,3,26,28 beq .Ldiffoffset_align_s1_8bytes clrrdi r3,r3,3 LD rA,0,r3 LD rB,0,r4 /* unaligned load */ sld rA,rA,r6 srd rA,rA,r6 srd rB,rB,r6 cmpld cr0,rA,rB srwi r6,r6,3 bne cr0,.LcmpAB_lightweight subfic r6,r6,8 subf. r5,r6,r5 addi r3,r3,8 add r4,r4,r6 beq .Lzero .Ldiffoffset_align_s1_8bytes: /* now s1 is aligned with 8 bytes. */ #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION /* only do vmx ops when the size equal or greater than 4K bytes */ cmpdi cr5,r5,VMX_THRESH bge cr5,.Ldiffoffset_vmx_cmp END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) .Ldiffoffset_novmx_cmp: #endif cmpdi cr5,r5,31 ble cr5,.Lcmp_lt32bytes #ifdef CONFIG_ALTIVEC b .Llong_novmx_cmp #else b .Llong #endif #ifdef CONFIG_ALTIVEC .Ldiffoffset_vmx_cmp: /* perform a 32 bytes pre-checking before * enable VMX operations. */ li r0,4 mtctr r0 .Ldiffoffset_prechk_32B_loop: LD rA,0,r3 LD rB,0,r4 cmpld cr0,rA,rB addi r3,r3,8 addi r4,r4,8 bne cr0,.LcmpAB_lightweight addi r5,r5,-8 bdnz .Ldiffoffset_prechk_32B_loop ENTER_VMX_OPS beq cr1,.Ldiffoffset_novmx_cmp .Ldiffoffset_vmx_cmp_start: /* Firstly try to align r3 with 16 bytes */ andi. r6,r3,0xf li off16,16 beq .Ldiffoffset_vmx_s1_16bytes_align LVS v3,0,r3 LVS v4,0,r4 lvx v5,0,r3 lvx v6,0,r4 LD_VSR_CROSS16B(r3,v3,v5,v7,v9) LD_VSR_CROSS16B(r4,v4,v6,v8,v10) VCMPEQUB_RC(v7,v9,v10) bnl cr6,.Ldiffoffset_vmx_diff_found subfic r6,r6,16 subf r5,r6,r5 add r3,r3,r6 add r4,r4,r6 .Ldiffoffset_vmx_s1_16bytes_align: /* now s1 is aligned with 16 bytes */ lvx v6,0,r4 LVS v4,0,r4 srdi r6,r5,5 /* loop for 32 bytes each */ clrldi r5,r5,59 mtctr r6 .balign 16 .Ldiffoffset_vmx_32bytesloop: /* the first qw of r4 was saved in v6 */ lvx v9,0,r3 LD_VSR_CROSS16B(r4,v4,v6,v8,v10) VCMPEQUB_RC(v7,v9,v10) vor v6,v8,v8 bnl cr6,.Ldiffoffset_vmx_diff_found addi r3,r3,16 addi r4,r4,16 lvx v9,0,r3 LD_VSR_CROSS16B(r4,v4,v6,v8,v10) VCMPEQUB_RC(v7,v9,v10) vor v6,v8,v8 bnl cr6,.Ldiffoffset_vmx_diff_found addi r3,r3,16 addi r4,r4,16 bdnz .Ldiffoffset_vmx_32bytesloop EXIT_VMX_OPS cmpdi r5,0 beq .Lzero b .Lcmp_lt32bytes .Ldiffoffset_vmx_diff_found: EXIT_VMX_OPS /* anyway, the diff will appear in next 16 bytes */ li r5,16 b .Lcmp_lt32bytes #endif EXPORT_SYMBOL(memcmp)
aixcc-public/challenge-001-exemplar-source
10,966
arch/powerpc/lib/crtsavres.S
/* * Special support for eabi and SVR4 * * Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc. * Copyright 2008 Freescale Semiconductor, Inc. * Written By Michael Meissner * * Based on gcc/config/rs6000/crtsavres.asm from gcc * 64 bit additions from reading the PPC elf64abi document. * * This file is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * In addition to the permissions in the GNU General Public License, the * Free Software Foundation gives you unlimited permission to link the * compiled version of this file with other programs, and to distribute * those programs without any restriction coming from the use of this * file. (The General Public License restrictions do apply in other * respects; for example, they cover modification of the file, and * distribution when not linked into another program.) * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * * As a special exception, if you link this library with files * compiled with GCC to produce an executable, this does not cause * the resulting executable to be covered by the GNU General Public License. * This exception does not however invalidate any other reasons why * the executable file might be covered by the GNU General Public License. */ #include <asm/ppc_asm.h> .file "crtsavres.S" #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE .section ".text" #ifndef CONFIG_PPC64 /* Routines for saving integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer save area. */ _GLOBAL(_savegpr_14) _GLOBAL(_save32gpr_14) stw 14,-72(11) /* save gp registers */ _GLOBAL(_savegpr_15) _GLOBAL(_save32gpr_15) stw 15,-68(11) _GLOBAL(_savegpr_16) _GLOBAL(_save32gpr_16) stw 16,-64(11) _GLOBAL(_savegpr_17) _GLOBAL(_save32gpr_17) stw 17,-60(11) _GLOBAL(_savegpr_18) _GLOBAL(_save32gpr_18) stw 18,-56(11) _GLOBAL(_savegpr_19) _GLOBAL(_save32gpr_19) stw 19,-52(11) _GLOBAL(_savegpr_20) _GLOBAL(_save32gpr_20) stw 20,-48(11) _GLOBAL(_savegpr_21) _GLOBAL(_save32gpr_21) stw 21,-44(11) _GLOBAL(_savegpr_22) _GLOBAL(_save32gpr_22) stw 22,-40(11) _GLOBAL(_savegpr_23) _GLOBAL(_save32gpr_23) stw 23,-36(11) _GLOBAL(_savegpr_24) _GLOBAL(_save32gpr_24) stw 24,-32(11) _GLOBAL(_savegpr_25) _GLOBAL(_save32gpr_25) stw 25,-28(11) _GLOBAL(_savegpr_26) _GLOBAL(_save32gpr_26) stw 26,-24(11) _GLOBAL(_savegpr_27) _GLOBAL(_save32gpr_27) stw 27,-20(11) _GLOBAL(_savegpr_28) _GLOBAL(_save32gpr_28) stw 28,-16(11) _GLOBAL(_savegpr_29) _GLOBAL(_save32gpr_29) stw 29,-12(11) _GLOBAL(_savegpr_30) _GLOBAL(_save32gpr_30) stw 30,-8(11) _GLOBAL(_savegpr_31) _GLOBAL(_save32gpr_31) stw 31,-4(11) blr /* Routines for restoring integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer restore area. */ _GLOBAL(_restgpr_14) _GLOBAL(_rest32gpr_14) lwz 14,-72(11) /* restore gp registers */ _GLOBAL(_restgpr_15) _GLOBAL(_rest32gpr_15) lwz 15,-68(11) _GLOBAL(_restgpr_16) _GLOBAL(_rest32gpr_16) lwz 16,-64(11) _GLOBAL(_restgpr_17) _GLOBAL(_rest32gpr_17) lwz 17,-60(11) _GLOBAL(_restgpr_18) _GLOBAL(_rest32gpr_18) lwz 18,-56(11) _GLOBAL(_restgpr_19) _GLOBAL(_rest32gpr_19) lwz 19,-52(11) _GLOBAL(_restgpr_20) _GLOBAL(_rest32gpr_20) lwz 20,-48(11) _GLOBAL(_restgpr_21) _GLOBAL(_rest32gpr_21) lwz 21,-44(11) _GLOBAL(_restgpr_22) _GLOBAL(_rest32gpr_22) lwz 22,-40(11) _GLOBAL(_restgpr_23) _GLOBAL(_rest32gpr_23) lwz 23,-36(11) _GLOBAL(_restgpr_24) _GLOBAL(_rest32gpr_24) lwz 24,-32(11) _GLOBAL(_restgpr_25) _GLOBAL(_rest32gpr_25) lwz 25,-28(11) _GLOBAL(_restgpr_26) _GLOBAL(_rest32gpr_26) lwz 26,-24(11) _GLOBAL(_restgpr_27) _GLOBAL(_rest32gpr_27) lwz 27,-20(11) _GLOBAL(_restgpr_28) _GLOBAL(_rest32gpr_28) lwz 28,-16(11) _GLOBAL(_restgpr_29) _GLOBAL(_rest32gpr_29) lwz 29,-12(11) _GLOBAL(_restgpr_30) _GLOBAL(_rest32gpr_30) lwz 30,-8(11) _GLOBAL(_restgpr_31) _GLOBAL(_rest32gpr_31) lwz 31,-4(11) blr /* Routines for restoring integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer restore area. */ _GLOBAL(_restgpr_14_x) _GLOBAL(_rest32gpr_14_x) lwz 14,-72(11) /* restore gp registers */ _GLOBAL(_restgpr_15_x) _GLOBAL(_rest32gpr_15_x) lwz 15,-68(11) _GLOBAL(_restgpr_16_x) _GLOBAL(_rest32gpr_16_x) lwz 16,-64(11) _GLOBAL(_restgpr_17_x) _GLOBAL(_rest32gpr_17_x) lwz 17,-60(11) _GLOBAL(_restgpr_18_x) _GLOBAL(_rest32gpr_18_x) lwz 18,-56(11) _GLOBAL(_restgpr_19_x) _GLOBAL(_rest32gpr_19_x) lwz 19,-52(11) _GLOBAL(_restgpr_20_x) _GLOBAL(_rest32gpr_20_x) lwz 20,-48(11) _GLOBAL(_restgpr_21_x) _GLOBAL(_rest32gpr_21_x) lwz 21,-44(11) _GLOBAL(_restgpr_22_x) _GLOBAL(_rest32gpr_22_x) lwz 22,-40(11) _GLOBAL(_restgpr_23_x) _GLOBAL(_rest32gpr_23_x) lwz 23,-36(11) _GLOBAL(_restgpr_24_x) _GLOBAL(_rest32gpr_24_x) lwz 24,-32(11) _GLOBAL(_restgpr_25_x) _GLOBAL(_rest32gpr_25_x) lwz 25,-28(11) _GLOBAL(_restgpr_26_x) _GLOBAL(_rest32gpr_26_x) lwz 26,-24(11) _GLOBAL(_restgpr_27_x) _GLOBAL(_rest32gpr_27_x) lwz 27,-20(11) _GLOBAL(_restgpr_28_x) _GLOBAL(_rest32gpr_28_x) lwz 28,-16(11) _GLOBAL(_restgpr_29_x) _GLOBAL(_rest32gpr_29_x) lwz 29,-12(11) _GLOBAL(_restgpr_30_x) _GLOBAL(_rest32gpr_30_x) lwz 30,-8(11) _GLOBAL(_restgpr_31_x) _GLOBAL(_rest32gpr_31_x) lwz 0,4(11) lwz 31,-4(11) mtlr 0 mr 1,11 blr #ifdef CONFIG_ALTIVEC /* Called with r0 pointing just beyond the end of the vector save area. */ _GLOBAL(_savevr_20) li r11,-192 stvx v20,r11,r0 _GLOBAL(_savevr_21) li r11,-176 stvx v21,r11,r0 _GLOBAL(_savevr_22) li r11,-160 stvx v22,r11,r0 _GLOBAL(_savevr_23) li r11,-144 stvx v23,r11,r0 _GLOBAL(_savevr_24) li r11,-128 stvx v24,r11,r0 _GLOBAL(_savevr_25) li r11,-112 stvx v25,r11,r0 _GLOBAL(_savevr_26) li r11,-96 stvx v26,r11,r0 _GLOBAL(_savevr_27) li r11,-80 stvx v27,r11,r0 _GLOBAL(_savevr_28) li r11,-64 stvx v28,r11,r0 _GLOBAL(_savevr_29) li r11,-48 stvx v29,r11,r0 _GLOBAL(_savevr_30) li r11,-32 stvx v30,r11,r0 _GLOBAL(_savevr_31) li r11,-16 stvx v31,r11,r0 blr _GLOBAL(_restvr_20) li r11,-192 lvx v20,r11,r0 _GLOBAL(_restvr_21) li r11,-176 lvx v21,r11,r0 _GLOBAL(_restvr_22) li r11,-160 lvx v22,r11,r0 _GLOBAL(_restvr_23) li r11,-144 lvx v23,r11,r0 _GLOBAL(_restvr_24) li r11,-128 lvx v24,r11,r0 _GLOBAL(_restvr_25) li r11,-112 lvx v25,r11,r0 _GLOBAL(_restvr_26) li r11,-96 lvx v26,r11,r0 _GLOBAL(_restvr_27) li r11,-80 lvx v27,r11,r0 _GLOBAL(_restvr_28) li r11,-64 lvx v28,r11,r0 _GLOBAL(_restvr_29) li r11,-48 lvx v29,r11,r0 _GLOBAL(_restvr_30) li r11,-32 lvx v30,r11,r0 _GLOBAL(_restvr_31) li r11,-16 lvx v31,r11,r0 blr #endif /* CONFIG_ALTIVEC */ #else /* CONFIG_PPC64 */ .globl _savegpr0_14 _savegpr0_14: std r14,-144(r1) .globl _savegpr0_15 _savegpr0_15: std r15,-136(r1) .globl _savegpr0_16 _savegpr0_16: std r16,-128(r1) .globl _savegpr0_17 _savegpr0_17: std r17,-120(r1) .globl _savegpr0_18 _savegpr0_18: std r18,-112(r1) .globl _savegpr0_19 _savegpr0_19: std r19,-104(r1) .globl _savegpr0_20 _savegpr0_20: std r20,-96(r1) .globl _savegpr0_21 _savegpr0_21: std r21,-88(r1) .globl _savegpr0_22 _savegpr0_22: std r22,-80(r1) .globl _savegpr0_23 _savegpr0_23: std r23,-72(r1) .globl _savegpr0_24 _savegpr0_24: std r24,-64(r1) .globl _savegpr0_25 _savegpr0_25: std r25,-56(r1) .globl _savegpr0_26 _savegpr0_26: std r26,-48(r1) .globl _savegpr0_27 _savegpr0_27: std r27,-40(r1) .globl _savegpr0_28 _savegpr0_28: std r28,-32(r1) .globl _savegpr0_29 _savegpr0_29: std r29,-24(r1) .globl _savegpr0_30 _savegpr0_30: std r30,-16(r1) .globl _savegpr0_31 _savegpr0_31: std r31,-8(r1) std r0,16(r1) blr .globl _restgpr0_14 _restgpr0_14: ld r14,-144(r1) .globl _restgpr0_15 _restgpr0_15: ld r15,-136(r1) .globl _restgpr0_16 _restgpr0_16: ld r16,-128(r1) .globl _restgpr0_17 _restgpr0_17: ld r17,-120(r1) .globl _restgpr0_18 _restgpr0_18: ld r18,-112(r1) .globl _restgpr0_19 _restgpr0_19: ld r19,-104(r1) .globl _restgpr0_20 _restgpr0_20: ld r20,-96(r1) .globl _restgpr0_21 _restgpr0_21: ld r21,-88(r1) .globl _restgpr0_22 _restgpr0_22: ld r22,-80(r1) .globl _restgpr0_23 _restgpr0_23: ld r23,-72(r1) .globl _restgpr0_24 _restgpr0_24: ld r24,-64(r1) .globl _restgpr0_25 _restgpr0_25: ld r25,-56(r1) .globl _restgpr0_26 _restgpr0_26: ld r26,-48(r1) .globl _restgpr0_27 _restgpr0_27: ld r27,-40(r1) .globl _restgpr0_28 _restgpr0_28: ld r28,-32(r1) .globl _restgpr0_29 _restgpr0_29: ld r0,16(r1) ld r29,-24(r1) mtlr r0 ld r30,-16(r1) ld r31,-8(r1) blr .globl _restgpr0_30 _restgpr0_30: ld r30,-16(r1) .globl _restgpr0_31 _restgpr0_31: ld r0,16(r1) ld r31,-8(r1) mtlr r0 blr #ifdef CONFIG_ALTIVEC /* Called with r0 pointing just beyond the end of the vector save area. */ .globl _savevr_20 _savevr_20: li r12,-192 stvx v20,r12,r0 .globl _savevr_21 _savevr_21: li r12,-176 stvx v21,r12,r0 .globl _savevr_22 _savevr_22: li r12,-160 stvx v22,r12,r0 .globl _savevr_23 _savevr_23: li r12,-144 stvx v23,r12,r0 .globl _savevr_24 _savevr_24: li r12,-128 stvx v24,r12,r0 .globl _savevr_25 _savevr_25: li r12,-112 stvx v25,r12,r0 .globl _savevr_26 _savevr_26: li r12,-96 stvx v26,r12,r0 .globl _savevr_27 _savevr_27: li r12,-80 stvx v27,r12,r0 .globl _savevr_28 _savevr_28: li r12,-64 stvx v28,r12,r0 .globl _savevr_29 _savevr_29: li r12,-48 stvx v29,r12,r0 .globl _savevr_30 _savevr_30: li r12,-32 stvx v30,r12,r0 .globl _savevr_31 _savevr_31: li r12,-16 stvx v31,r12,r0 blr .globl _restvr_20 _restvr_20: li r12,-192 lvx v20,r12,r0 .globl _restvr_21 _restvr_21: li r12,-176 lvx v21,r12,r0 .globl _restvr_22 _restvr_22: li r12,-160 lvx v22,r12,r0 .globl _restvr_23 _restvr_23: li r12,-144 lvx v23,r12,r0 .globl _restvr_24 _restvr_24: li r12,-128 lvx v24,r12,r0 .globl _restvr_25 _restvr_25: li r12,-112 lvx v25,r12,r0 .globl _restvr_26 _restvr_26: li r12,-96 lvx v26,r12,r0 .globl _restvr_27 _restvr_27: li r12,-80 lvx v27,r12,r0 .globl _restvr_28 _restvr_28: li r12,-64 lvx v28,r12,r0 .globl _restvr_29 _restvr_29: li r12,-48 lvx v29,r12,r0 .globl _restvr_30 _restvr_30: li r12,-32 lvx v30,r12,r0 .globl _restvr_31 _restvr_31: li r12,-16 lvx v31,r12,r0 blr #endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_PPC64 */ #endif
aixcc-public/challenge-001-exemplar-source
20,729
arch/powerpc/lib/feature-fixups-test.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2008 Michael Ellerman, IBM Corporation. */ #include <asm/feature-fixups.h> #include <asm/ppc_asm.h> #include <asm/synch.h> #include <asm/asm-compat.h> #include <asm/ppc-opcode.h> .text #define globl(x) \ .globl x; \ x: globl(ftr_fixup_test1) or 1,1,1 or 2,2,2 /* fixup will nop out this instruction */ or 3,3,3 globl(end_ftr_fixup_test1) globl(ftr_fixup_test1_orig) or 1,1,1 or 2,2,2 or 3,3,3 globl(ftr_fixup_test1_expected) or 1,1,1 nop or 3,3,3 globl(ftr_fixup_test2) or 1,1,1 or 2,2,2 /* fixup will replace this with ftr_fixup_test2_alt */ or 3,3,3 globl(end_ftr_fixup_test2) globl(ftr_fixup_test2_orig) or 1,1,1 or 2,2,2 or 3,3,3 globl(ftr_fixup_test2_alt) or 31,31,31 globl(ftr_fixup_test2_expected) or 1,1,1 or 31,31,31 or 3,3,3 globl(ftr_fixup_test3) or 1,1,1 or 2,2,2 /* fixup will fail to replace this */ or 3,3,3 globl(end_ftr_fixup_test3) globl(ftr_fixup_test3_orig) or 1,1,1 or 2,2,2 or 3,3,3 globl(ftr_fixup_test3_alt) or 31,31,31 or 31,31,31 globl(ftr_fixup_test4) or 1,1,1 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 3,3,3 globl(end_ftr_fixup_test4) globl(ftr_fixup_test4_expected) or 1,1,1 or 31,31,31 or 31,31,31 nop nop or 3,3,3 globl(ftr_fixup_test4_orig) or 1,1,1 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 3,3,3 globl(ftr_fixup_test4_alt) or 31,31,31 or 31,31,31 globl(ftr_fixup_test5) or 1,1,1 BEGIN_FTR_SECTION or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 FTR_SECTION_ELSE 2: b 3f 3: or 5,5,5 beq 3b b 1f or 6,6,6 b 2b 1: bdnz 3b ALT_FTR_SECTION_END(0, 1) or 1,1,1 globl(end_ftr_fixup_test5) globl(ftr_fixup_test5_expected) or 1,1,1 2: b 3f 3: or 5,5,5 beq 3b b 1f or 6,6,6 b 2b 1: bdnz 3b or 1,1,1 globl(ftr_fixup_test6) 1: or 1,1,1 BEGIN_FTR_SECTION or 5,5,5 2: PPC_LCMPI r3,0 beq 4f blt 2b b 1b b 4f FTR_SECTION_ELSE 2: or 2,2,2 PPC_LCMPI r3,1 beq 3f blt 2b b 3f b 1b ALT_FTR_SECTION_END(0, 1) 3: or 1,1,1 or 2,2,2 4: or 3,3,3 globl(end_ftr_fixup_test6) globl(ftr_fixup_test6_expected) 1: or 1,1,1 2: or 2,2,2 PPC_LCMPI r3,1 beq 3f blt 2b b 3f b 1b 3: or 1,1,1 or 2,2,2 or 3,3,3 globl(ftr_fixup_test7) or 1,1,1 BEGIN_FTR_SECTION or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 or 2,2,2 FTR_SECTION_ELSE 2: b 3f 3: or 5,5,5 beq 3b b 1f or 6,6,6 b 2b bdnz 3b 1: ALT_FTR_SECTION_END(0, 1) or 1,1,1 or 1,1,1 globl(end_ftr_fixup_test7) nop globl(ftr_fixup_test7_expected) or 1,1,1 2: b 3f 3: or 5,5,5 beq 3b b 1f or 6,6,6 b 2b bdnz 3b 1: or 1,1,1 #if 0 /* Test that if we have a larger else case the assembler spots it and * reports an error. #if 0'ed so as not to break the build normally. */ ftr_fixup_test_too_big: or 1,1,1 BEGIN_FTR_SECTION or 2,2,2 or 2,2,2 or 2,2,2 FTR_SECTION_ELSE or 3,3,3 or 3,3,3 or 3,3,3 or 3,3,3 ALT_FTR_SECTION_END(0, 1) or 1,1,1 #endif #define MAKE_MACRO_TEST(TYPE) \ globl(ftr_fixup_test_ ##TYPE##_macros) \ or 1,1,1; \ /* Basic test, this section should all be nop'ed */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ or 2,2,2; \ or 2,2,2; \ END_##TYPE##_SECTION(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Basic test, this section should NOT be nop'ed */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ or 2,2,2; \ or 2,2,2; \ END_##TYPE##_SECTION(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Nesting test, inner section should be nop'ed */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(80) \ or 3,3,3; \ or 3,3,3; \ END_##TYPE##_SECTION_NESTED(0, 1, 80) \ or 2,2,2; \ or 2,2,2; \ END_##TYPE##_SECTION(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Nesting test, whole section should be nop'ed */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(80) \ or 3,3,3; \ or 3,3,3; \ END_##TYPE##_SECTION_NESTED(0, 0, 80) \ or 2,2,2; \ or 2,2,2; \ END_##TYPE##_SECTION(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Nesting test, none should be nop'ed */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(80) \ or 3,3,3; \ or 3,3,3; \ END_##TYPE##_SECTION_NESTED(0, 0, 80) \ or 2,2,2; \ or 2,2,2; \ END_##TYPE##_SECTION(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Basic alt section test, default case should be taken */ \ BEGIN_##TYPE##_SECTION \ or 3,3,3; \ or 3,3,3; \ or 3,3,3; \ ##TYPE##_SECTION_ELSE \ or 5,5,5; \ or 5,5,5; \ ALT_##TYPE##_SECTION_END(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Basic alt section test, else case should be taken */ \ BEGIN_##TYPE##_SECTION \ or 3,3,3; \ or 3,3,3; \ or 3,3,3; \ ##TYPE##_SECTION_ELSE \ or 31,31,31; \ or 31,31,31; \ or 31,31,31; \ ALT_##TYPE##_SECTION_END(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Alt with smaller else case, should be padded with nops */ \ BEGIN_##TYPE##_SECTION \ or 3,3,3; \ or 3,3,3; \ or 3,3,3; \ ##TYPE##_SECTION_ELSE \ or 31,31,31; \ ALT_##TYPE##_SECTION_END(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Alt section with nested section in default case */ \ /* Default case should be taken, with nop'ed inner section */ \ BEGIN_##TYPE##_SECTION \ or 3,3,3; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 3,3,3; \ or 3,3,3; \ END_##TYPE##_SECTION_NESTED(0, 1, 95) \ or 3,3,3; \ ##TYPE##_SECTION_ELSE \ or 2,2,2; \ or 2,2,2; \ ALT_##TYPE##_SECTION_END(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Alt section with nested section in else, default taken */ \ BEGIN_##TYPE##_SECTION \ or 3,3,3; \ or 3,3,3; \ or 3,3,3; \ ##TYPE##_SECTION_ELSE \ or 5,5,5; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 3,3,3; \ END_##TYPE##_SECTION_NESTED(0, 1, 95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Alt section with nested section in else, else taken & nop */ \ BEGIN_##TYPE##_SECTION \ or 3,3,3; \ or 3,3,3; \ or 3,3,3; \ ##TYPE##_SECTION_ELSE \ or 5,5,5; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 3,3,3; \ END_##TYPE##_SECTION_NESTED(0, 1, 95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Feature section with nested alt section, default taken */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 1,1,1; \ ##TYPE##_SECTION_ELSE_NESTED(95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ or 2,2,2; \ END_##TYPE##_SECTION(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Feature section with nested alt section, else taken */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 1,1,1; \ ##TYPE##_SECTION_ELSE_NESTED(95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ or 2,2,2; \ END_##TYPE##_SECTION(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Feature section with nested alt section, all nop'ed */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 1,1,1; \ ##TYPE##_SECTION_ELSE_NESTED(95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ or 2,2,2; \ END_##TYPE##_SECTION(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, default with inner default taken */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 1,1,1; \ ##TYPE##_SECTION_ELSE_NESTED(95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ or 2,2,2; \ ##TYPE##_SECTION_ELSE \ or 31,31,31; \ BEGIN_##TYPE##_SECTION_NESTED(94) \ or 5,5,5; \ ##TYPE##_SECTION_ELSE_NESTED(94) \ or 1,1,1; \ ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ or 31,31,31; \ ALT_##TYPE##_SECTION_END(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, default with inner else taken */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 1,1,1; \ ##TYPE##_SECTION_ELSE_NESTED(95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ or 2,2,2; \ ##TYPE##_SECTION_ELSE \ or 31,31,31; \ BEGIN_##TYPE##_SECTION_NESTED(94) \ or 5,5,5; \ ##TYPE##_SECTION_ELSE_NESTED(94) \ or 1,1,1; \ ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ or 31,31,31; \ ALT_##TYPE##_SECTION_END(0, 0) \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, else with inner default taken */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 1,1,1; \ ##TYPE##_SECTION_ELSE_NESTED(95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ or 2,2,2; \ ##TYPE##_SECTION_ELSE \ or 31,31,31; \ BEGIN_##TYPE##_SECTION_NESTED(94) \ or 5,5,5; \ ##TYPE##_SECTION_ELSE_NESTED(94) \ or 1,1,1; \ ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ or 31,31,31; \ ALT_##TYPE##_SECTION_END(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, else with inner else taken */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ BEGIN_##TYPE##_SECTION_NESTED(95) \ or 1,1,1; \ ##TYPE##_SECTION_ELSE_NESTED(95) \ or 5,5,5; \ ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ or 2,2,2; \ ##TYPE##_SECTION_ELSE \ or 31,31,31; \ BEGIN_##TYPE##_SECTION_NESTED(94) \ or 5,5,5; \ ##TYPE##_SECTION_ELSE_NESTED(94) \ or 1,1,1; \ ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) \ or 31,31,31; \ ALT_##TYPE##_SECTION_END(0, 1) \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, else can have large else case */ \ BEGIN_##TYPE##_SECTION \ or 2,2,2; \ or 2,2,2; \ or 2,2,2; \ or 2,2,2; \ ##TYPE##_SECTION_ELSE \ BEGIN_##TYPE##_SECTION_NESTED(94) \ or 5,5,5; \ or 5,5,5; \ or 5,5,5; \ or 5,5,5; \ ##TYPE##_SECTION_ELSE_NESTED(94) \ or 1,1,1; \ or 1,1,1; \ or 1,1,1; \ or 1,1,1; \ ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) \ ALT_##TYPE##_SECTION_END(0, 1) \ or 1,1,1; \ or 1,1,1; #define MAKE_MACRO_TEST_EXPECTED(TYPE) \ globl(ftr_fixup_test_ ##TYPE##_macros_expected) \ or 1,1,1; \ /* Basic test, this section should all be nop'ed */ \ /* BEGIN_##TYPE##_SECTION */ \ nop; \ nop; \ nop; \ /* END_##TYPE##_SECTION(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Basic test, this section should NOT be nop'ed */ \ /* BEGIN_##TYPE##_SECTION */ \ or 2,2,2; \ or 2,2,2; \ or 2,2,2; \ /* END_##TYPE##_SECTION(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Nesting test, inner section should be nop'ed */ \ /* BEGIN_##TYPE##_SECTION */ \ or 2,2,2; \ or 2,2,2; \ /* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ nop; \ nop; \ /* END_##TYPE##_SECTION_NESTED(0, 1, 80) */ \ or 2,2,2; \ or 2,2,2; \ /* END_##TYPE##_SECTION(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Nesting test, whole section should be nop'ed */ \ /* NB. inner section is not nop'ed, but then entire outer is */ \ /* BEGIN_##TYPE##_SECTION */ \ nop; \ nop; \ /* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ nop; \ nop; \ /* END_##TYPE##_SECTION_NESTED(0, 0, 80) */ \ nop; \ nop; \ /* END_##TYPE##_SECTION(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Nesting test, none should be nop'ed */ \ /* BEGIN_##TYPE##_SECTION */ \ or 2,2,2; \ or 2,2,2; \ /* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ or 3,3,3; \ or 3,3,3; \ /* END_##TYPE##_SECTION_NESTED(0, 0, 80) */ \ or 2,2,2; \ or 2,2,2; \ /* END_##TYPE##_SECTION(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Basic alt section test, default case should be taken */ \ /* BEGIN_##TYPE##_SECTION */ \ or 3,3,3; \ or 3,3,3; \ or 3,3,3; \ /* ##TYPE##_SECTION_ELSE */ \ /* or 5,5,5; */ \ /* or 5,5,5; */ \ /* ALT_##TYPE##_SECTION_END(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Basic alt section test, else case should be taken */ \ /* BEGIN_##TYPE##_SECTION */ \ /* or 3,3,3; */ \ /* or 3,3,3; */ \ /* or 3,3,3; */ \ /* ##TYPE##_SECTION_ELSE */ \ or 31,31,31; \ or 31,31,31; \ or 31,31,31; \ /* ALT_##TYPE##_SECTION_END(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Alt with smaller else case, should be padded with nops */ \ /* BEGIN_##TYPE##_SECTION */ \ /* or 3,3,3; */ \ /* or 3,3,3; */ \ /* or 3,3,3; */ \ /* ##TYPE##_SECTION_ELSE */ \ or 31,31,31; \ nop; \ nop; \ /* ALT_##TYPE##_SECTION_END(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Alt section with nested section in default case */ \ /* Default case should be taken, with nop'ed inner section */ \ /* BEGIN_##TYPE##_SECTION */ \ or 3,3,3; \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ nop; \ nop; \ /* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ or 3,3,3; \ /* ##TYPE##_SECTION_ELSE */ \ /* or 2,2,2; */ \ /* or 2,2,2; */ \ /* ALT_##TYPE##_SECTION_END(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Alt section with nested section in else, default taken */ \ /* BEGIN_##TYPE##_SECTION */ \ or 3,3,3; \ or 3,3,3; \ or 3,3,3; \ /* ##TYPE##_SECTION_ELSE */ \ /* or 5,5,5; */ \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ /* or 3,3,3; */ \ /* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ /* or 5,5,5; */ \ /* ALT_##TYPE##_SECTION_END(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Alt section with nested section in else, else taken & nop */ \ /* BEGIN_##TYPE##_SECTION */ \ /* or 3,3,3; */ \ /* or 3,3,3; */ \ /* or 3,3,3; */ \ /* ##TYPE##_SECTION_ELSE */ \ or 5,5,5; \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ nop; \ /* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ or 5,5,5; \ /* ALT_##TYPE##_SECTION_END(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Feature section with nested alt section, default taken */ \ /* BEGIN_##TYPE##_SECTION */ \ or 2,2,2; \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ or 1,1,1; \ /* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ /* or 5,5,5; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ or 2,2,2; \ /* END_##TYPE##_SECTION(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Feature section with nested alt section, else taken */ \ /* BEGIN_##TYPE##_SECTION */ \ or 2,2,2; \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ /* or 1,1,1; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ or 5,5,5; \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ or 2,2,2; \ /* END_##TYPE##_SECTION(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Feature section with nested alt section, all nop'ed */ \ /* BEGIN_##TYPE##_SECTION */ \ nop; \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ nop; \ /* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ /* or 5,5,5; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ nop; \ /* END_##TYPE##_SECTION(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, default with inner default taken */ \ /* BEGIN_##TYPE##_SECTION */ \ or 2,2,2; \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ or 1,1,1; \ /* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ /* or 5,5,5; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ or 2,2,2; \ /* ##TYPE##_SECTION_ELSE */ \ /* or 31,31,31; */ \ /* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ /* or 5,5,5; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ /* or 1,1,1; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ /* or 31,31,31; */ \ /* ALT_##TYPE##_SECTION_END(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, default with inner else taken */ \ /* BEGIN_##TYPE##_SECTION */ \ or 2,2,2; \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ /* or 1,1,1; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ or 5,5,5; \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ or 2,2,2; \ /* ##TYPE##_SECTION_ELSE */ \ /* or 31,31,31; */ \ /* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ /* or 5,5,5; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ /* or 1,1,1; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ /* or 31,31,31; */ \ /* ALT_##TYPE##_SECTION_END(0, 0) */ \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, else with inner default taken */ \ /* BEGIN_##TYPE##_SECTION */ \ /* or 2,2,2; */ \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ /* or 1,1,1; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ /* or 5,5,5; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ /* or 2,2,2; */ \ /* ##TYPE##_SECTION_ELSE */ \ or 31,31,31; \ /* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ or 5,5,5; \ /* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ /* or 1,1,1; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ or 31,31,31; \ /* ALT_##TYPE##_SECTION_END(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, else with inner else taken */ \ /* BEGIN_##TYPE##_SECTION */ \ /* or 2,2,2; */ \ /* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ /* or 1,1,1; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ /* or 5,5,5; */ \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ /* or 2,2,2; */ \ /* ##TYPE##_SECTION_ELSE */ \ or 31,31,31; \ /* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ /* or 5,5,5; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ or 1,1,1; \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) */ \ or 31,31,31; \ /* ALT_##TYPE##_SECTION_END(0, 1) */ \ or 1,1,1; \ or 1,1,1; \ /* Nested alt sections, else can have large else case */ \ /* BEGIN_##TYPE##_SECTION */ \ /* or 2,2,2; */ \ /* or 2,2,2; */ \ /* or 2,2,2; */ \ /* or 2,2,2; */ \ /* ##TYPE##_SECTION_ELSE */ \ /* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ /* or 5,5,5; */ \ /* or 5,5,5; */ \ /* or 5,5,5; */ \ /* or 5,5,5; */ \ /* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ or 1,1,1; \ or 1,1,1; \ or 1,1,1; \ or 1,1,1; \ /* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) */ \ /* ALT_##TYPE##_SECTION_END(0, 1) */ \ or 1,1,1; \ or 1,1,1; MAKE_MACRO_TEST(FTR); MAKE_MACRO_TEST_EXPECTED(FTR); #ifdef CONFIG_PPC64 MAKE_MACRO_TEST(FW_FTR); MAKE_MACRO_TEST_EXPECTED(FW_FTR); #endif globl(lwsync_fixup_test) 1: or 1,1,1 LWSYNC globl(end_lwsync_fixup_test) globl(lwsync_fixup_test_expected_LWSYNC) 1: or 1,1,1 lwsync globl(lwsync_fixup_test_expected_SYNC) 1: or 1,1,1 sync globl(ftr_fixup_prefix1) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000000 or 2,2,2 globl(end_ftr_fixup_prefix1) globl(ftr_fixup_prefix1_orig) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000000 or 2,2,2 globl(ftr_fixup_prefix1_expected) or 1,1,1 nop nop or 2,2,2 globl(ftr_fixup_prefix2) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000000 or 2,2,2 globl(end_ftr_fixup_prefix2) globl(ftr_fixup_prefix2_orig) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000000 or 2,2,2 globl(ftr_fixup_prefix2_alt) .long OP_PREFIX << 26 .long 0x0000001 globl(ftr_fixup_prefix2_expected) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000001 or 2,2,2 globl(ftr_fixup_prefix3) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000000 or 2,2,2 or 3,3,3 globl(end_ftr_fixup_prefix3) globl(ftr_fixup_prefix3_orig) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000000 or 2,2,2 or 3,3,3 globl(ftr_fixup_prefix3_alt) .long OP_PREFIX << 26 .long 0x0000001 nop globl(ftr_fixup_prefix3_expected) or 1,1,1 .long OP_PREFIX << 26 .long 0x0000001 nop or 3,3,3
aixcc-public/challenge-001-exemplar-source
4,168
arch/powerpc/lib/copy_mc_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) IBM Corporation, 2011 * Derived from copyuser_power7.s by Anton Blanchard <anton@au.ibm.com> * Author - Balbir Singh <bsingharora@gmail.com> */ #include <asm/ppc_asm.h> #include <asm/errno.h> #include <asm/export.h> .macro err1 100: EX_TABLE(100b,.Ldo_err1) .endm .macro err2 200: EX_TABLE(200b,.Ldo_err2) .endm .macro err3 300: EX_TABLE(300b,.Ldone) .endm .Ldo_err2: ld r22,STK_REG(R22)(r1) ld r21,STK_REG(R21)(r1) ld r20,STK_REG(R20)(r1) ld r19,STK_REG(R19)(r1) ld r18,STK_REG(R18)(r1) ld r17,STK_REG(R17)(r1) ld r16,STK_REG(R16)(r1) ld r15,STK_REG(R15)(r1) ld r14,STK_REG(R14)(r1) addi r1,r1,STACKFRAMESIZE .Ldo_err1: /* Do a byte by byte copy to get the exact remaining size */ mtctr r7 46: err3; lbz r0,0(r4) addi r4,r4,1 err3; stb r0,0(r3) addi r3,r3,1 bdnz 46b li r3,0 blr .Ldone: mfctr r3 blr _GLOBAL(copy_mc_generic) mr r7,r5 cmpldi r5,16 blt .Lshort_copy .Lcopy: /* Get the source 8B aligned */ neg r6,r4 mtocrf 0x01,r6 clrldi r6,r6,(64-3) bf cr7*4+3,1f err1; lbz r0,0(r4) addi r4,r4,1 err1; stb r0,0(r3) addi r3,r3,1 subi r7,r7,1 1: bf cr7*4+2,2f err1; lhz r0,0(r4) addi r4,r4,2 err1; sth r0,0(r3) addi r3,r3,2 subi r7,r7,2 2: bf cr7*4+1,3f err1; lwz r0,0(r4) addi r4,r4,4 err1; stw r0,0(r3) addi r3,r3,4 subi r7,r7,4 3: sub r5,r5,r6 cmpldi r5,128 mflr r0 stdu r1,-STACKFRAMESIZE(r1) std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) std r17,STK_REG(R17)(r1) std r18,STK_REG(R18)(r1) std r19,STK_REG(R19)(r1) std r20,STK_REG(R20)(r1) std r21,STK_REG(R21)(r1) std r22,STK_REG(R22)(r1) std r0,STACKFRAMESIZE+16(r1) blt 5f srdi r6,r5,7 mtctr r6 /* Now do cacheline (128B) sized loads and stores. */ .align 5 4: err2; ld r0,0(r4) err2; ld r6,8(r4) err2; ld r8,16(r4) err2; ld r9,24(r4) err2; ld r10,32(r4) err2; ld r11,40(r4) err2; ld r12,48(r4) err2; ld r14,56(r4) err2; ld r15,64(r4) err2; ld r16,72(r4) err2; ld r17,80(r4) err2; ld r18,88(r4) err2; ld r19,96(r4) err2; ld r20,104(r4) err2; ld r21,112(r4) err2; ld r22,120(r4) addi r4,r4,128 err2; std r0,0(r3) err2; std r6,8(r3) err2; std r8,16(r3) err2; std r9,24(r3) err2; std r10,32(r3) err2; std r11,40(r3) err2; std r12,48(r3) err2; std r14,56(r3) err2; std r15,64(r3) err2; std r16,72(r3) err2; std r17,80(r3) err2; std r18,88(r3) err2; std r19,96(r3) err2; std r20,104(r3) err2; std r21,112(r3) err2; std r22,120(r3) addi r3,r3,128 subi r7,r7,128 bdnz 4b clrldi r5,r5,(64-7) /* Up to 127B to go */ 5: srdi r6,r5,4 mtocrf 0x01,r6 6: bf cr7*4+1,7f err2; ld r0,0(r4) err2; ld r6,8(r4) err2; ld r8,16(r4) err2; ld r9,24(r4) err2; ld r10,32(r4) err2; ld r11,40(r4) err2; ld r12,48(r4) err2; ld r14,56(r4) addi r4,r4,64 err2; std r0,0(r3) err2; std r6,8(r3) err2; std r8,16(r3) err2; std r9,24(r3) err2; std r10,32(r3) err2; std r11,40(r3) err2; std r12,48(r3) err2; std r14,56(r3) addi r3,r3,64 subi r7,r7,64 7: ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) ld r17,STK_REG(R17)(r1) ld r18,STK_REG(R18)(r1) ld r19,STK_REG(R19)(r1) ld r20,STK_REG(R20)(r1) ld r21,STK_REG(R21)(r1) ld r22,STK_REG(R22)(r1) addi r1,r1,STACKFRAMESIZE /* Up to 63B to go */ bf cr7*4+2,8f err1; ld r0,0(r4) err1; ld r6,8(r4) err1; ld r8,16(r4) err1; ld r9,24(r4) addi r4,r4,32 err1; std r0,0(r3) err1; std r6,8(r3) err1; std r8,16(r3) err1; std r9,24(r3) addi r3,r3,32 subi r7,r7,32 /* Up to 31B to go */ 8: bf cr7*4+3,9f err1; ld r0,0(r4) err1; ld r6,8(r4) addi r4,r4,16 err1; std r0,0(r3) err1; std r6,8(r3) addi r3,r3,16 subi r7,r7,16 9: clrldi r5,r5,(64-4) /* Up to 15B to go */ .Lshort_copy: mtocrf 0x01,r5 bf cr7*4+0,12f err1; lwz r0,0(r4) /* Less chance of a reject with word ops */ err1; lwz r6,4(r4) addi r4,r4,8 err1; stw r0,0(r3) err1; stw r6,4(r3) addi r3,r3,8 subi r7,r7,8 12: bf cr7*4+1,13f err1; lwz r0,0(r4) addi r4,r4,4 err1; stw r0,0(r3) addi r3,r3,4 subi r7,r7,4 13: bf cr7*4+2,14f err1; lhz r0,0(r4) addi r4,r4,2 err1; sth r0,0(r3) addi r3,r3,2 subi r7,r7,2 14: bf cr7*4+3,15f err1; lbz r0,0(r4) err1; stb r0,0(r3) 15: li r3,0 blr EXPORT_SYMBOL_GPL(copy_mc_generic);