repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
aixcc-public/challenge-001-exemplar-source
1,867
arch/x86/lib/copy_page_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <asm/export.h> /* * Some CPUs run faster using the string copy instructions (sane microcode). * It is also a lot simpler. Use this when possible. But, don't use streaming * copy unless the CPU indicates X86_FEATURE_REP_GOOD. Could vary the * prefetch distance based on SMP/UP. */ ALIGN SYM_FUNC_START(copy_page) ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD movl $4096/8, %ecx rep movsq RET SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) SYM_FUNC_START_LOCAL(copy_page_regs) subq $2*8, %rsp movq %rbx, (%rsp) movq %r12, 1*8(%rsp) movl $(4096/64)-5, %ecx .p2align 4 .Loop64: dec %rcx movq 0x8*0(%rsi), %rax movq 0x8*1(%rsi), %rbx movq 0x8*2(%rsi), %rdx movq 0x8*3(%rsi), %r8 movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 movq 0x8*7(%rsi), %r12 prefetcht0 5*64(%rsi) movq %rax, 0x8*0(%rdi) movq %rbx, 0x8*1(%rdi) movq %rdx, 0x8*2(%rdi) movq %r8, 0x8*3(%rdi) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) movq %r12, 0x8*7(%rdi) leaq 64 (%rsi), %rsi leaq 64 (%rdi), %rdi jnz .Loop64 movl $5, %ecx .p2align 4 .Loop2: decl %ecx movq 0x8*0(%rsi), %rax movq 0x8*1(%rsi), %rbx movq 0x8*2(%rsi), %rdx movq 0x8*3(%rsi), %r8 movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 movq 0x8*7(%rsi), %r12 movq %rax, 0x8*0(%rdi) movq %rbx, 0x8*1(%rdi) movq %rdx, 0x8*2(%rdi) movq %r8, 0x8*3(%rdi) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) movq %r12, 0x8*7(%rdi) leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi jnz .Loop2 movq (%rsp), %rbx movq 1*8(%rsp), %r12 addq $2*8, %rsp RET SYM_FUNC_END(copy_page_regs)
aixcc-public/challenge-001-exemplar-source
2,673
arch/x86/lib/atomic64_cx8_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * atomic64_t for 586+ * * Copyright © 2010 Luca Barbieri */ #include <linux/linkage.h> #include <asm/alternative.h> .macro read64 reg movl %ebx, %eax movl %ecx, %edx /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ LOCK_PREFIX cmpxchg8b (\reg) .endm SYM_FUNC_START(atomic64_read_cx8) read64 %ecx RET SYM_FUNC_END(atomic64_read_cx8) SYM_FUNC_START(atomic64_set_cx8) 1: /* we don't need LOCK_PREFIX since aligned 64-bit writes * are atomic on 586 and newer */ cmpxchg8b (%esi) jne 1b RET SYM_FUNC_END(atomic64_set_cx8) SYM_FUNC_START(atomic64_xchg_cx8) 1: LOCK_PREFIX cmpxchg8b (%esi) jne 1b RET SYM_FUNC_END(atomic64_xchg_cx8) .macro addsub_return func ins insc SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebp pushl %ebx pushl %esi pushl %edi movl %eax, %esi movl %edx, %edi movl %ecx, %ebp read64 %ecx 1: movl %eax, %ebx movl %edx, %ecx \ins\()l %esi, %ebx \insc\()l %edi, %ecx LOCK_PREFIX cmpxchg8b (%ebp) jne 1b 10: movl %ebx, %eax movl %ecx, %edx popl %edi popl %esi popl %ebx popl %ebp RET SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm addsub_return add add adc addsub_return sub sub sbb .macro incdec_return func ins insc SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebx read64 %esi 1: movl %eax, %ebx movl %edx, %ecx \ins\()l $1, %ebx \insc\()l $0, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b 10: movl %ebx, %eax movl %ecx, %edx popl %ebx RET SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm incdec_return inc add adc incdec_return dec sub sbb SYM_FUNC_START(atomic64_dec_if_positive_cx8) pushl %ebx read64 %esi 1: movl %eax, %ebx movl %edx, %ecx subl $1, %ebx sbb $0, %ecx js 2f LOCK_PREFIX cmpxchg8b (%esi) jne 1b 2: movl %ebx, %eax movl %ecx, %edx popl %ebx RET SYM_FUNC_END(atomic64_dec_if_positive_cx8) SYM_FUNC_START(atomic64_add_unless_cx8) pushl %ebp pushl %ebx /* these just push these two parameters on the stack */ pushl %edi pushl %ecx movl %eax, %ebp movl %edx, %edi read64 %esi 1: cmpl %eax, 0(%esp) je 4f 2: movl %eax, %ebx movl %edx, %ecx addl %ebp, %ebx adcl %edi, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b movl $1, %eax 3: addl $8, %esp popl %ebx popl %ebp RET 4: cmpl %edx, 4(%esp) jne 2b xorl %eax, %eax jmp 3b SYM_FUNC_END(atomic64_add_unless_cx8) SYM_FUNC_START(atomic64_inc_not_zero_cx8) pushl %ebx read64 %esi 1: movl %eax, %ecx orl %edx, %ecx jz 3f movl %eax, %ebx xorl %ecx, %ecx addl $1, %ebx adcl %edx, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b movl $1, %eax 3: popl %ebx RET SYM_FUNC_END(atomic64_inc_not_zero_cx8)
aixcc-public/challenge-001-exemplar-source
3,716
arch/x86/lib/memmove_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Normally compiler builtins are used, but sometimes the compiler calls out * of line code. Based on asm-i386/string.h. * * This assembly file is re-written from memmove_64.c file. * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> */ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <asm/export.h> #undef memmove /* * Implement memmove(). This can handle overlap between src and dst. * * Input: * rdi: dest * rsi: src * rdx: count * * Output: * rax: dest */ SYM_FUNC_START(__memmove) mov %rdi, %rax /* Decide forward/backward copy mode */ cmp %rdi, %rsi jge .Lmemmove_begin_forward mov %rsi, %r8 add %rdx, %r8 cmp %rdi, %r8 jg 2f /* FSRM implies ERMS => no length checks, do the copy directly */ .Lmemmove_begin_forward: ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS /* * movsq instruction have many startup latency * so we handle small size by general register. */ cmp $680, %rdx jb 3f /* * movsq instruction is only good for aligned case. */ cmpb %dil, %sil je 4f 3: sub $0x20, %rdx /* * We gobble 32 bytes forward in each loop. */ 5: sub $0x20, %rdx movq 0*8(%rsi), %r11 movq 1*8(%rsi), %r10 movq 2*8(%rsi), %r9 movq 3*8(%rsi), %r8 leaq 4*8(%rsi), %rsi movq %r11, 0*8(%rdi) movq %r10, 1*8(%rdi) movq %r9, 2*8(%rdi) movq %r8, 3*8(%rdi) leaq 4*8(%rdi), %rdi jae 5b addq $0x20, %rdx jmp 1f /* * Handle data forward by movsq. */ .p2align 4 4: movq %rdx, %rcx movq -8(%rsi, %rdx), %r11 lea -8(%rdi, %rdx), %r10 shrq $3, %rcx rep movsq movq %r11, (%r10) jmp 13f .Lmemmove_end_forward: /* * Handle data backward by movsq. */ .p2align 4 7: movq %rdx, %rcx movq (%rsi), %r11 movq %rdi, %r10 leaq -8(%rsi, %rdx), %rsi leaq -8(%rdi, %rdx), %rdi shrq $3, %rcx std rep movsq cld movq %r11, (%r10) jmp 13f /* * Start to prepare for backward copy. */ .p2align 4 2: cmp $0x20, %rdx jb 1f cmp $680, %rdx jb 6f cmp %dil, %sil je 7b 6: /* * Calculate copy position to tail. */ addq %rdx, %rsi addq %rdx, %rdi subq $0x20, %rdx /* * We gobble 32 bytes backward in each loop. */ 8: subq $0x20, %rdx movq -1*8(%rsi), %r11 movq -2*8(%rsi), %r10 movq -3*8(%rsi), %r9 movq -4*8(%rsi), %r8 leaq -4*8(%rsi), %rsi movq %r11, -1*8(%rdi) movq %r10, -2*8(%rdi) movq %r9, -3*8(%rdi) movq %r8, -4*8(%rdi) leaq -4*8(%rdi), %rdi jae 8b /* * Calculate copy position to head. */ addq $0x20, %rdx subq %rdx, %rsi subq %rdx, %rdi 1: cmpq $16, %rdx jb 9f /* * Move data from 16 bytes to 31 bytes. */ movq 0*8(%rsi), %r11 movq 1*8(%rsi), %r10 movq -2*8(%rsi, %rdx), %r9 movq -1*8(%rsi, %rdx), %r8 movq %r11, 0*8(%rdi) movq %r10, 1*8(%rdi) movq %r9, -2*8(%rdi, %rdx) movq %r8, -1*8(%rdi, %rdx) jmp 13f .p2align 4 9: cmpq $8, %rdx jb 10f /* * Move data from 8 bytes to 15 bytes. */ movq 0*8(%rsi), %r11 movq -1*8(%rsi, %rdx), %r10 movq %r11, 0*8(%rdi) movq %r10, -1*8(%rdi, %rdx) jmp 13f 10: cmpq $4, %rdx jb 11f /* * Move data from 4 bytes to 7 bytes. */ movl (%rsi), %r11d movl -4(%rsi, %rdx), %r10d movl %r11d, (%rdi) movl %r10d, -4(%rdi, %rdx) jmp 13f 11: cmp $2, %rdx jb 12f /* * Move data from 2 bytes to 3 bytes. */ movw (%rsi), %r11w movw -2(%rsi, %rdx), %r10w movw %r11w, (%rdi) movw %r10w, -2(%rdi, %rdx) jmp 13f 12: cmp $1, %rdx jb 13f /* * Move data for 1 byte. */ movb (%rsi), %r11b movb %r11b, (%rdi) 13: RET .Lmemmove_erms: movq %rdx, %rcx rep movsb RET SYM_FUNC_END(__memmove) EXPORT_SYMBOL(__memmove) SYM_FUNC_ALIAS_WEAK(memmove, __memmove) EXPORT_SYMBOL(memmove)
aixcc-public/challenge-001-exemplar-source
3,798
arch/x86/lib/copy_mc_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */ #include <linux/linkage.h> #include <asm/asm.h> #ifndef CONFIG_UML #ifdef CONFIG_X86_MCE /* * copy_mc_fragile - copy memory with indication if an exception / fault happened * * The 'fragile' version is opted into by platform quirks and takes * pains to avoid unrecoverable corner cases like 'fast-string' * instruction sequences, and consuming poison across a cacheline * boundary. The non-fragile version is equivalent to memcpy() * regardless of CPU machine-check-recovery capability. */ SYM_FUNC_START(copy_mc_fragile) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words /* Check for bad alignment of source */ testl $7, %esi /* Already aligned */ jz .L_8byte_aligned /* Copy one byte at a time until source is 8-byte aligned */ movl %esi, %ecx andl $7, %ecx subl $8, %ecx negl %ecx subl %ecx, %edx .L_read_leading_bytes: movb (%rsi), %al .L_write_leading_bytes: movb %al, (%rdi) incq %rsi incq %rdi decl %ecx jnz .L_read_leading_bytes .L_8byte_aligned: movl %edx, %ecx andl $7, %edx shrl $3, %ecx jz .L_no_whole_words .L_read_words: movq (%rsi), %r8 .L_write_words: movq %r8, (%rdi) addq $8, %rsi addq $8, %rdi decl %ecx jnz .L_read_words /* Any trailing bytes? */ .L_no_whole_words: andl %edx, %edx jz .L_done_memcpy_trap /* Copy trailing bytes */ movl %edx, %ecx .L_read_trailing_bytes: movb (%rsi), %al .L_write_trailing_bytes: movb %al, (%rdi) incq %rsi incq %rdi decl %ecx jnz .L_read_trailing_bytes /* Copy successful. Return zero */ .L_done_memcpy_trap: xorl %eax, %eax .L_done: RET /* * Return number of bytes not copied for any failure. Note that * there is no "tail" handling since the source buffer is 8-byte * aligned and poison is cacheline aligned. */ .E_read_words: shll $3, %ecx .E_leading_bytes: addl %edx, %ecx .E_trailing_bytes: mov %ecx, %eax jmp .L_done /* * For write fault handling, given the destination is unaligned, * we handle faults on multi-byte writes with a byte-by-byte * copy up to the write-protected page. */ .E_write_words: shll $3, %ecx addl %edx, %ecx movl %ecx, %edx jmp copy_mc_fragile_handle_tail _ASM_EXTABLE_TYPE(.L_read_leading_bytes, .E_leading_bytes, EX_TYPE_DEFAULT_MCE_SAFE) _ASM_EXTABLE_TYPE(.L_read_words, .E_read_words, EX_TYPE_DEFAULT_MCE_SAFE) _ASM_EXTABLE_TYPE(.L_read_trailing_bytes, .E_trailing_bytes, EX_TYPE_DEFAULT_MCE_SAFE) _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) _ASM_EXTABLE(.L_write_words, .E_write_words) _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) SYM_FUNC_END(copy_mc_fragile) #endif /* CONFIG_X86_MCE */ /* * copy_mc_enhanced_fast_string - memory copy with exception handling * * Fast string copy + fault / exception handling. If the CPU does * support machine check exception recovery, but does not support * recovering from fast-string exceptions then this CPU needs to be * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any * machine check recovery support this version should be no slower than * standard memcpy. */ SYM_FUNC_START(copy_mc_enhanced_fast_string) movq %rdi, %rax movq %rdx, %rcx .L_copy: rep movsb /* Copy successful. Return zero */ xorl %eax, %eax RET .E_copy: /* * On fault %rcx is updated such that the copy instruction could * optionally be restarted at the fault position, i.e. it * contains 'bytes remaining'. A non-zero return indicates error * to copy_mc_generic() users, or indicate short transfers to * user-copy routines. */ movq %rcx, %rax RET _ASM_EXTABLE_TYPE(.L_copy, .E_copy, EX_TYPE_DEFAULT_MCE_SAFE) SYM_FUNC_END(copy_mc_enhanced_fast_string) #endif /* !CONFIG_UML */
aixcc-public/challenge-001-exemplar-source
4,620
arch/x86/lib/getuser.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * __get_user functions. * * (C) Copyright 1998 Linus Torvalds * (C) Copyright 2005 Andi Kleen * (C) Copyright 2008 Glauber Costa * * These functions have a non-standard call interface * to make them more efficient, especially as they * return an error value in addition to the "real" * return value. */ /* * __get_user_X * * Inputs: %[r|e]ax contains the address. * * Outputs: %[r|e]ax is error code (0 or -EFAULT) * %[r|e]dx contains zero-extended value * %ecx contains the high half for 32-bit __get_user_8 * * * These functions should not modify any other registers, * as they get called from within inline assembly. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> #define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC #ifdef CONFIG_X86_5LEVEL #define LOAD_TASK_SIZE_MINUS_N(n) \ ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rdx), \ __stringify(mov $((1 << 56) - 4096 - (n)),%rdx), X86_FEATURE_LA57 #else #define LOAD_TASK_SIZE_MINUS_N(n) \ mov $(TASK_SIZE_MAX - (n)),%_ASM_DX #endif .text SYM_FUNC_START(__get_user_1) LOAD_TASK_SIZE_MINUS_N(0) cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 1: movzbl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC RET SYM_FUNC_END(__get_user_1) EXPORT_SYMBOL(__get_user_1) SYM_FUNC_START(__get_user_2) LOAD_TASK_SIZE_MINUS_N(1) cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 2: movzwl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC RET SYM_FUNC_END(__get_user_2) EXPORT_SYMBOL(__get_user_2) SYM_FUNC_START(__get_user_4) LOAD_TASK_SIZE_MINUS_N(3) cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 3: movl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC RET SYM_FUNC_END(__get_user_4) EXPORT_SYMBOL(__get_user_4) SYM_FUNC_START(__get_user_8) #ifdef CONFIG_X86_64 LOAD_TASK_SIZE_MINUS_N(7) cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 4: movq (%_ASM_AX),%rdx xor %eax,%eax ASM_CLAC RET #else LOAD_TASK_SIZE_MINUS_N(7) cmp %_ASM_DX,%_ASM_AX jae bad_get_user_8 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC 4: movl (%_ASM_AX),%edx 5: movl 4(%_ASM_AX),%ecx xor %eax,%eax ASM_CLAC RET #endif SYM_FUNC_END(__get_user_8) EXPORT_SYMBOL(__get_user_8) /* .. and the same for __get_user, just without the range checks */ SYM_FUNC_START(__get_user_nocheck_1) ASM_STAC ASM_BARRIER_NOSPEC 6: movzbl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC RET SYM_FUNC_END(__get_user_nocheck_1) EXPORT_SYMBOL(__get_user_nocheck_1) SYM_FUNC_START(__get_user_nocheck_2) ASM_STAC ASM_BARRIER_NOSPEC 7: movzwl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC RET SYM_FUNC_END(__get_user_nocheck_2) EXPORT_SYMBOL(__get_user_nocheck_2) SYM_FUNC_START(__get_user_nocheck_4) ASM_STAC ASM_BARRIER_NOSPEC 8: movl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC RET SYM_FUNC_END(__get_user_nocheck_4) EXPORT_SYMBOL(__get_user_nocheck_4) SYM_FUNC_START(__get_user_nocheck_8) ASM_STAC ASM_BARRIER_NOSPEC #ifdef CONFIG_X86_64 9: movq (%_ASM_AX),%rdx #else 9: movl (%_ASM_AX),%edx 10: movl 4(%_ASM_AX),%ecx #endif xor %eax,%eax ASM_CLAC RET SYM_FUNC_END(__get_user_nocheck_8) EXPORT_SYMBOL(__get_user_nocheck_8) SYM_CODE_START_LOCAL(.Lbad_get_user_clac) ASM_CLAC bad_get_user: xor %edx,%edx mov $(-EFAULT),%_ASM_AX RET SYM_CODE_END(.Lbad_get_user_clac) #ifdef CONFIG_X86_32 SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac) ASM_CLAC bad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX RET SYM_CODE_END(.Lbad_get_user_8_clac) #endif /* get_user */ _ASM_EXTABLE_UA(1b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(2b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(3b, .Lbad_get_user_clac) #ifdef CONFIG_X86_64 _ASM_EXTABLE_UA(4b, .Lbad_get_user_clac) #else _ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac) _ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac) #endif /* __get_user */ _ASM_EXTABLE_UA(6b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(7b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(8b, .Lbad_get_user_clac) #ifdef CONFIG_X86_64 _ASM_EXTABLE_UA(9b, .Lbad_get_user_clac) #else _ASM_EXTABLE_UA(9b, .Lbad_get_user_8_clac) _ASM_EXTABLE_UA(10b, .Lbad_get_user_8_clac) #endif
aixcc-public/challenge-001-exemplar-source
2,774
arch/x86/lib/putuser.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * __put_user functions. * * (C) Copyright 2005 Linus Torvalds * (C) Copyright 2005 Andi Kleen * (C) Copyright 2008 Glauber Costa * * These functions have a non-standard call interface * to make them more efficient, especially as they * return an error value in addition to the "real" * return value. */ #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/errno.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> /* * __put_user_X * * Inputs: %eax[:%edx] contains the data * %ecx contains the address * * Outputs: %ecx is error code (0 or -EFAULT) * * Clobbers: %ebx needed for task pointer * * These functions should not modify any other registers, * as they get called from within inline assembly. */ #ifdef CONFIG_X86_5LEVEL #define LOAD_TASK_SIZE_MINUS_N(n) \ ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rbx), \ __stringify(mov $((1 << 56) - 4096 - (n)),%rbx), X86_FEATURE_LA57 #else #define LOAD_TASK_SIZE_MINUS_N(n) \ mov $(TASK_SIZE_MAX - (n)),%_ASM_BX #endif .text SYM_FUNC_START(__put_user_1) LOAD_TASK_SIZE_MINUS_N(0) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) ENDBR ASM_STAC 1: movb %al,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) EXPORT_SYMBOL(__put_user_nocheck_1) SYM_FUNC_START(__put_user_2) LOAD_TASK_SIZE_MINUS_N(1) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) ENDBR ASM_STAC 2: movw %ax,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) EXPORT_SYMBOL(__put_user_nocheck_2) SYM_FUNC_START(__put_user_4) LOAD_TASK_SIZE_MINUS_N(3) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) ENDBR ASM_STAC 3: movl %eax,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) EXPORT_SYMBOL(__put_user_nocheck_4) SYM_FUNC_START(__put_user_8) LOAD_TASK_SIZE_MINUS_N(7) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL) ENDBR ASM_STAC 4: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 5: movl %edx,4(%_ASM_CX) #endif xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_8) EXPORT_SYMBOL(__put_user_8) EXPORT_SYMBOL(__put_user_nocheck_8) SYM_CODE_START_LOCAL(.Lbad_put_user_clac) ASM_CLAC .Lbad_put_user: movl $-EFAULT,%ecx RET SYM_CODE_END(.Lbad_put_user_clac) _ASM_EXTABLE_UA(1b, .Lbad_put_user_clac) _ASM_EXTABLE_UA(2b, .Lbad_put_user_clac) _ASM_EXTABLE_UA(3b, .Lbad_put_user_clac) _ASM_EXTABLE_UA(4b, .Lbad_put_user_clac) #ifdef CONFIG_X86_32 _ASM_EXTABLE_UA(5b, .Lbad_put_user_clac) #endif
aixcc-public/challenge-001-exemplar-source
1,984
arch/x86/purgatory/entry64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2003,2004 Eric Biederman (ebiederm@xmission.com) * Copyright (C) 2014 Red Hat Inc. * Author(s): Vivek Goyal <vgoyal@redhat.com> * * This code has been taken from kexec-tools. */ #include <linux/linkage.h> .text .balign 16 .code64 SYM_CODE_START(entry64) /* Setup a gdt that should be preserved */ lgdt gdt(%rip) /* load the data segments */ movl $0x18, %eax /* data segment */ movl %eax, %ds movl %eax, %es movl %eax, %ss movl %eax, %fs movl %eax, %gs /* Setup new stack */ leaq stack_init(%rip), %rsp pushq $0x10 /* CS */ leaq new_cs_exit(%rip), %rax pushq %rax lretq new_cs_exit: /* Load the registers */ movq rax(%rip), %rax movq rbx(%rip), %rbx movq rcx(%rip), %rcx movq rdx(%rip), %rdx movq rsi(%rip), %rsi movq rdi(%rip), %rdi movq rsp(%rip), %rsp movq rbp(%rip), %rbp movq r8(%rip), %r8 movq r9(%rip), %r9 movq r10(%rip), %r10 movq r11(%rip), %r11 movq r12(%rip), %r12 movq r13(%rip), %r13 movq r14(%rip), %r14 movq r15(%rip), %r15 /* Jump to the new code... */ jmpq *rip(%rip) SYM_CODE_END(entry64) .section ".rodata" .balign 4 SYM_DATA_START(entry64_regs) rax: .quad 0x0 rcx: .quad 0x0 rdx: .quad 0x0 rbx: .quad 0x0 rsp: .quad 0x0 rbp: .quad 0x0 rsi: .quad 0x0 rdi: .quad 0x0 r8: .quad 0x0 r9: .quad 0x0 r10: .quad 0x0 r11: .quad 0x0 r12: .quad 0x0 r13: .quad 0x0 r14: .quad 0x0 r15: .quad 0x0 rip: .quad 0x0 SYM_DATA_END(entry64_regs) /* GDT */ .section ".rodata" .balign 16 SYM_DATA_START_LOCAL(gdt) /* * 0x00 unusable segment * 0x08 unused * so use them as gdt ptr */ .word gdt_end - gdt - 1 .quad gdt .word 0, 0, 0 /* 0x10 4GB flat code segment */ .word 0xFFFF, 0x0000, 0x9A00, 0x00AF /* 0x18 4GB flat data segment */ .word 0xFFFF, 0x0000, 0x9200, 0x00CF SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) SYM_DATA_START_LOCAL(stack) .quad 0, 0 SYM_DATA_END_LABEL(stack, SYM_L_LOCAL, stack_init)
aixcc-public/challenge-001-exemplar-source
1,198
arch/x86/purgatory/setup-x86_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * purgatory: setup code * * Copyright (C) 2003,2004 Eric Biederman (ebiederm@xmission.com) * Copyright (C) 2014 Red Hat Inc. * * This code has been taken from kexec-tools. */ #include <linux/linkage.h> #include <asm/purgatory.h> .text .balign 16 .code64 SYM_CODE_START(purgatory_start) /* Load a gdt so I know what the segment registers are */ lgdt gdt(%rip) /* load the data segments */ movl $0x18, %eax /* data segment */ movl %eax, %ds movl %eax, %es movl %eax, %ss movl %eax, %fs movl %eax, %gs /* Setup a stack */ leaq lstack_end(%rip), %rsp /* Call the C code */ call purgatory jmp entry64 SYM_CODE_END(purgatory_start) .section ".rodata" .balign 16 SYM_DATA_START_LOCAL(gdt) /* 0x00 unusable segment * 0x08 unused * so use them as the gdt ptr */ .word gdt_end - gdt - 1 .quad gdt .word 0, 0, 0 /* 0x10 4GB flat code segment */ .word 0xFFFF, 0x0000, 0x9A00, 0x00AF /* 0x18 4GB flat data segment */ .word 0xFFFF, 0x0000, 0x9200, 0x00CF SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) .bss .balign 4096 SYM_DATA_START_LOCAL(lstack) .skip 4096 SYM_DATA_END_LABEL(lstack, SYM_L_LOCAL, lstack_end)
aixcc-public/challenge-001-exemplar-source
1,424
arch/x86/boot/bioscall.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* ----------------------------------------------------------------------- * * Copyright 2009-2014 Intel Corporation; author H. Peter Anvin * * ----------------------------------------------------------------------- */ /* * "Glove box" for BIOS calls. Avoids the constant problems with BIOSes * touching registers they shouldn't be. */ .code16 .section ".inittext","ax" .globl intcall .type intcall, @function intcall: /* Self-modify the INT instruction. Ugly, but works. */ cmpb %al, 3f je 1f movb %al, 3f jmp 1f /* Synchronize pipeline */ 1: /* Save state */ pushfl pushw %fs pushw %gs pushal /* Copy input state to stack frame */ subw $44, %sp movw %dx, %si movw %sp, %di movw $11, %cx rep; movsl /* Pop full state from the stack */ popal popw %gs popw %fs popw %es popw %ds popfl /* Actual INT */ .byte 0xcd /* INT opcode */ 3: .byte 0 /* Push full state to the stack */ pushfl pushw %ds pushw %es pushw %fs pushw %gs pushal /* Re-establish C environment invariants */ cld movzwl %sp, %esp movw %cs, %ax movw %ax, %ds movw %ax, %es /* Copy output state from stack frame */ movw 68(%esp), %di /* Original %cx == 3rd argument */ andw %di, %di jz 4f movw %sp, %si movw $11, %cx rep; movsl 4: addw $44, %sp /* Restore state and return */ popal popw %gs popw %fs popfl retl .size intcall, .-intcall
aixcc-public/challenge-001-exemplar-source
18,078
arch/x86/boot/header.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * header.S * * Copyright (C) 1991, 1992 Linus Torvalds * * Based on bootsect.S and setup.S * modified by more people than can be counted * * Rewritten as a common file by H. Peter Anvin (Apr 2007) * * BIG FAT NOTE: We're in real mode using 64k segments. Therefore segment * addresses must be multiplied by 16 to obtain their respective linear * addresses. To avoid confusion, linear addresses are written using leading * hex while segment addresses are written as segment:offset. * */ #include <linux/pe.h> #include <asm/segment.h> #include <asm/boot.h> #include <asm/page_types.h> #include <asm/setup.h> #include <asm/bootparam.h> #include "boot.h" #include "voffset.h" #include "zoffset.h" BOOTSEG = 0x07C0 /* original address of boot-sector */ SYSSEG = 0x1000 /* historical load address >> 4 */ #ifndef SVGA_MODE #define SVGA_MODE ASK_VGA #endif #ifndef ROOT_RDONLY #define ROOT_RDONLY 1 #endif .code16 .section ".bstext", "ax" .global bootsect_start bootsect_start: #ifdef CONFIG_EFI_STUB # "MZ", MS-DOS header .word MZ_MAGIC #endif # Normalize the start address ljmp $BOOTSEG, $start2 start2: movw %cs, %ax movw %ax, %ds movw %ax, %es movw %ax, %ss xorw %sp, %sp sti cld movw $bugger_off_msg, %si msg_loop: lodsb andb %al, %al jz bs_die movb $0xe, %ah movw $7, %bx int $0x10 jmp msg_loop bs_die: # Allow the user to press a key, then reboot xorw %ax, %ax int $0x16 int $0x19 # int 0x19 should never return. In case it does anyway, # invoke the BIOS reset code... ljmp $0xf000,$0xfff0 #ifdef CONFIG_EFI_STUB .org 0x3c # # Offset to the PE header. # .long pe_header #endif /* CONFIG_EFI_STUB */ .section ".bsdata", "a" bugger_off_msg: .ascii "Use a boot loader.\r\n" .ascii "\n" .ascii "Remove disk and press any key to reboot...\r\n" .byte 0 #ifdef CONFIG_EFI_STUB pe_header: .long PE_MAGIC coff_header: #ifdef CONFIG_X86_32 .set image_file_add_flags, IMAGE_FILE_32BIT_MACHINE .set pe_opt_magic, PE_OPT_MAGIC_PE32 .word IMAGE_FILE_MACHINE_I386 #else .set image_file_add_flags, 0 .set pe_opt_magic, PE_OPT_MAGIC_PE32PLUS .word IMAGE_FILE_MACHINE_AMD64 #endif .word section_count # nr_sections .long 0 # TimeDateStamp .long 0 # PointerToSymbolTable .long 1 # NumberOfSymbols .word section_table - optional_header # SizeOfOptionalHeader .word IMAGE_FILE_EXECUTABLE_IMAGE | \ image_file_add_flags | \ IMAGE_FILE_DEBUG_STRIPPED | \ IMAGE_FILE_LINE_NUMS_STRIPPED # Characteristics optional_header: .word pe_opt_magic .byte 0x02 # MajorLinkerVersion .byte 0x14 # MinorLinkerVersion # Filled in by build.c .long 0 # SizeOfCode .long 0 # SizeOfInitializedData .long 0 # SizeOfUninitializedData # Filled in by build.c .long 0x0000 # AddressOfEntryPoint .long 0x0200 # BaseOfCode #ifdef CONFIG_X86_32 .long 0 # data #endif extra_header_fields: # PE specification requires ImageBase to be 64k aligned .set image_base, (LOAD_PHYSICAL_ADDR + 0xffff) & ~0xffff #ifdef CONFIG_X86_32 .long image_base # ImageBase #else .quad image_base # ImageBase #endif .long 0x20 # SectionAlignment .long 0x20 # FileAlignment .word 0 # MajorOperatingSystemVersion .word 0 # MinorOperatingSystemVersion .word LINUX_EFISTUB_MAJOR_VERSION # MajorImageVersion .word LINUX_EFISTUB_MINOR_VERSION # MinorImageVersion .word 0 # MajorSubsystemVersion .word 0 # MinorSubsystemVersion .long 0 # Win32VersionValue # # The size of the bzImage is written in tools/build.c # .long 0 # SizeOfImage .long 0x200 # SizeOfHeaders .long 0 # CheckSum .word IMAGE_SUBSYSTEM_EFI_APPLICATION # Subsystem (EFI application) #ifdef CONFIG_EFI_DXE_MEM_ATTRIBUTES .word IMAGE_DLL_CHARACTERISTICS_NX_COMPAT # DllCharacteristics #else .word 0 # DllCharacteristics #endif #ifdef CONFIG_X86_32 .long 0 # SizeOfStackReserve .long 0 # SizeOfStackCommit .long 0 # SizeOfHeapReserve .long 0 # SizeOfHeapCommit #else .quad 0 # SizeOfStackReserve .quad 0 # SizeOfStackCommit .quad 0 # SizeOfHeapReserve .quad 0 # SizeOfHeapCommit #endif .long 0 # LoaderFlags .long (section_table - .) / 8 # NumberOfRvaAndSizes .quad 0 # ExportTable .quad 0 # ImportTable .quad 0 # ResourceTable .quad 0 # ExceptionTable .quad 0 # CertificationTable .quad 0 # BaseRelocationTable # Section table section_table: # # The offset & size fields are filled in by build.c. # .ascii ".setup" .byte 0 .byte 0 .long 0 .long 0x0 # startup_{32,64} .long 0 # Size of initialized data # on disk .long 0x0 # startup_{32,64} .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long IMAGE_SCN_CNT_CODE | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE | \ IMAGE_SCN_ALIGN_16BYTES # Characteristics # # The EFI application loader requires a relocation section # because EFI applications must be relocatable. The .reloc # offset & size fields are filled in by build.c. # .ascii ".reloc" .byte 0 .byte 0 .long 0 .long 0 .long 0 # SizeOfRawData .long 0 # PointerToRawData .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_DISCARDABLE | \ IMAGE_SCN_ALIGN_1BYTES # Characteristics #ifdef CONFIG_EFI_MIXED # # The offset & size fields are filled in by build.c. # .asciz ".compat" .long 0 .long 0x0 .long 0 # Size of initialized data # on disk .long 0x0 .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_DISCARDABLE | \ IMAGE_SCN_ALIGN_1BYTES # Characteristics #endif # # The offset & size fields are filled in by build.c. # .ascii ".text" .byte 0 .byte 0 .byte 0 .long 0 .long 0x0 # startup_{32,64} .long 0 # Size of initialized data # on disk .long 0x0 # startup_{32,64} .long 0 # PointerToRelocations .long 0 # PointerToLineNumbers .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long IMAGE_SCN_CNT_CODE | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE | \ IMAGE_SCN_ALIGN_16BYTES # Characteristics .set section_count, (. - section_table) / 40 #endif /* CONFIG_EFI_STUB */ # Kernel attributes; used by setup. This is part 1 of the # header, from the old boot sector. .section ".header", "a" .globl sentinel sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */ .globl hdr hdr: setup_sects: .byte 0 /* Filled in by build.c */ root_flags: .word ROOT_RDONLY syssize: .long 0 /* Filled in by build.c */ ram_size: .word 0 /* Obsolete */ vid_mode: .word SVGA_MODE root_dev: .word 0 /* Filled in by build.c */ boot_flag: .word 0xAA55 # offset 512, entry point .globl _start _start: # Explicitly enter this as bytes, or the assembler # tries to generate a 3-byte jump here, which causes # everything else to push off to the wrong offset. .byte 0xeb # short (2-byte) jump .byte start_of_setup-1f 1: # Part 2 of the header, from the old setup.S .ascii "HdrS" # header signature .word 0x020f # header version number (>= 0x0105) # or else old loadlin-1.5 will fail) .globl realmode_swtch realmode_swtch: .word 0, 0 # default_switch, SETUPSEG start_sys_seg: .word SYSSEG # obsolete and meaningless, but just # in case something decided to "use" it .word kernel_version-512 # pointing to kernel version string # above section of header is compatible # with loadlin-1.5 (header v1.5). Don't # change it. type_of_loader: .byte 0 # 0 means ancient bootloader, newer # bootloaders know to change this. # See Documentation/x86/boot.rst for # assigned ids # flags, unused bits must be zero (RFU) bit within loadflags loadflags: .byte LOADED_HIGH # The kernel is to be loaded high setup_move_size: .word 0x8000 # size to move, when setup is not # loaded at 0x90000. We will move setup # to 0x90000 then just before jumping # into the kernel. However, only the # loader knows how much data behind # us also needs to be loaded. code32_start: # here loaders can put a different # start address for 32-bit code. .long 0x100000 # 0x100000 = default for big kernel ramdisk_image: .long 0 # address of loaded ramdisk image # Here the loader puts the 32-bit # address where it loaded the image. # This only will be read by the kernel. ramdisk_size: .long 0 # its size in bytes bootsect_kludge: .long 0 # obsolete heap_end_ptr: .word _end+STACK_SIZE-512 # (Header version 0x0201 or later) # space from here (exclusive) down to # end of setup code can be used by setup # for local heap purposes. ext_loader_ver: .byte 0 # Extended boot loader version ext_loader_type: .byte 0 # Extended boot loader type cmd_line_ptr: .long 0 # (Header version 0x0202 or later) # If nonzero, a 32-bit pointer # to the kernel command line. # The command line should be # located between the start of # setup and the end of low # memory (0xa0000), or it may # get overwritten before it # gets read. If this field is # used, there is no longer # anything magical about the # 0x90000 segment; the setup # can be located anywhere in # low memory 0x10000 or higher. initrd_addr_max: .long 0x7fffffff # (Header version 0x0203 or later) # The highest safe address for # the contents of an initrd # The current kernel allows up to 4 GB, # but leave it at 2 GB to avoid # possible bootloader bugs. kernel_alignment: .long CONFIG_PHYSICAL_ALIGN #physical addr alignment #required for protected mode #kernel #ifdef CONFIG_RELOCATABLE relocatable_kernel: .byte 1 #else relocatable_kernel: .byte 0 #endif min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment xloadflags: #ifdef CONFIG_X86_64 # define XLF0 XLF_KERNEL_64 /* 64-bit kernel */ #else # define XLF0 0 #endif #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) /* kernel/boot_param/ramdisk could be loaded above 4g */ # define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G #else # define XLF1 0 #endif #ifdef CONFIG_EFI_STUB # ifdef CONFIG_EFI_MIXED # define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64) # else # ifdef CONFIG_X86_64 # define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */ # else # define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */ # endif # endif #else # define XLF23 0 #endif #if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC_CORE) # define XLF4 XLF_EFI_KEXEC #else # define XLF4 0 #endif #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_5LEVEL #define XLF56 (XLF_5LEVEL|XLF_5LEVEL_ENABLED) #else #define XLF56 XLF_5LEVEL #endif #else #define XLF56 0 #endif .word XLF0 | XLF1 | XLF23 | XLF4 | XLF56 cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, #added with boot protocol #version 2.06 hardware_subarch: .long 0 # subarchitecture, added with 2.07 # default to 0 for normal x86 PC hardware_subarch_data: .quad 0 payload_offset: .long ZO_input_data payload_length: .long ZO_z_input_len setup_data: .quad 0 # 64-bit physical pointer to # single linked list of # struct setup_data pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr # # Getting to provably safe in-place decompression is hard. Worst case # behaviours need to be analyzed. Here let's take the decompression of # a gzip-compressed kernel as example, to illustrate it: # # The file layout of gzip compressed kernel is: # # magic[2] # method[1] # flags[1] # timestamp[4] # extraflags[1] # os[1] # compressed data blocks[N] # crc[4] orig_len[4] # # ... resulting in +18 bytes overhead of uncompressed data. # # (For more information, please refer to RFC 1951 and RFC 1952.) # # Files divided into blocks # 1 bit (last block flag) # 2 bits (block type) # # 1 block occurs every 32K -1 bytes or when there 50% compression # has been achieved. The smallest block type encoding is always used. # # stored: # 32 bits length in bytes. # # fixed: # magic fixed tree. # symbols. # # dynamic: # dynamic tree encoding. # symbols. # # # The buffer for decompression in place is the length of the uncompressed # data, plus a small amount extra to keep the algorithm safe. The # compressed data is placed at the end of the buffer. The output pointer # is placed at the start of the buffer and the input pointer is placed # where the compressed data starts. Problems will occur when the output # pointer overruns the input pointer. # # The output pointer can only overrun the input pointer if the input # pointer is moving faster than the output pointer. A condition only # triggered by data whose compressed form is larger than the uncompressed # form. # # The worst case at the block level is a growth of the compressed data # of 5 bytes per 32767 bytes. # # The worst case internal to a compressed block is very hard to figure. # The worst case can at least be bounded by having one bit that represents # 32764 bytes and then all of the rest of the bytes representing the very # very last byte. # # All of which is enough to compute an amount of extra data that is required # to be safe. To avoid problems at the block level allocating 5 extra bytes # per 32767 bytes of data is sufficient. To avoid problems internal to a # block adding an extra 32767 bytes (the worst case uncompressed block size) # is sufficient, to ensure that in the worst case the decompressed data for # block will stop the byte before the compressed data for a block begins. # To avoid problems with the compressed data's meta information an extra 18 # bytes are needed. Leading to the formula: # # extra_bytes = (uncompressed_size >> 12) + 32768 + 18 # # Adding 8 bytes per 32K is a bit excessive but much easier to calculate. # Adding 32768 instead of 32767 just makes for round numbers. # # Above analysis is for decompressing gzip compressed kernel only. Up to # now 6 different decompressor are supported all together. And among them # xz stores data in chunks and has maximum chunk of 64K. Hence safety # margin should be updated to cover all decompressors so that we don't # need to deal with each of them separately. Please check # the description in lib/decompressor_xxx.c for specific information. # # extra_bytes = (uncompressed_size >> 12) + 65536 + 128 # # LZ4 is even worse: data that cannot be further compressed grows by 0.4%, # or one byte per 256 bytes. OTOH, we can safely get rid of the +128 as # the size-dependent part now grows so fast. # # extra_bytes = (uncompressed_size >> 8) + 65536 # # ZSTD compressed data grows by at most 3 bytes per 128K, and only has a 22 # byte fixed overhead but has a maximum block size of 128K, so it needs a # larger margin. # # extra_bytes = (uncompressed_size >> 8) + 131072 #define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 131072) #if ZO_z_output_len > ZO_z_input_len # define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \ ZO_z_input_len) #else # define ZO_z_extract_offset ZO_z_extra_bytes #endif /* * The extract_offset has to be bigger than ZO head section. Otherwise when * the head code is running to move ZO to the end of the buffer, it will * overwrite the head code itself. */ #if (ZO__ehead - ZO_startup_32) > ZO_z_extract_offset # define ZO_z_min_extract_offset ((ZO__ehead - ZO_startup_32 + 4095) & ~4095) #else # define ZO_z_min_extract_offset ((ZO_z_extract_offset + 4095) & ~4095) #endif #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_min_extract_offset) #define VO_INIT_SIZE (VO__end - VO__text) #if ZO_INIT_SIZE > VO_INIT_SIZE # define INIT_SIZE ZO_INIT_SIZE #else # define INIT_SIZE VO_INIT_SIZE #endif init_size: .long INIT_SIZE # kernel initialization size handover_offset: .long 0 # Filled in by build.c kernel_info_offset: .long 0 # Filled in by build.c # End of setup header ##################################################### .section ".entrytext", "ax" start_of_setup: # Force %es = %ds movw %ds, %ax movw %ax, %es cld # Apparently some ancient versions of LILO invoked the kernel with %ss != %ds, # which happened to work by accident for the old code. Recalculate the stack # pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the # stack behind its own code, so we can't blindly put it directly past the heap. movw %ss, %dx cmpw %ax, %dx # %ds == %ss? movw %sp, %dx je 2f # -> assume %sp is reasonably set # Invalid %ss, make up a new stack movw $_end, %dx testb $CAN_USE_HEAP, loadflags jz 1f movw heap_end_ptr, %dx 1: addw $STACK_SIZE, %dx jnc 2f xorw %dx, %dx # Prevent wraparound 2: # Now %dx should point to the end of our stack space andw $~3, %dx # dword align (might as well...) jnz 3f movw $0xfffc, %dx # Make sure we're not zero 3: movw %ax, %ss movzwl %dx, %esp # Clear upper half of %esp sti # Now we should have a working stack # We will have entered with %cs = %ds+0x20, normalize %cs so # it is on par with the other segments. pushw %ds pushw $6f lretw 6: # Check signature at end of setup cmpl $0x5a5aaa55, setup_sig jne setup_bad # Zero the bss movw $__bss_start, %di movw $_end+3, %cx xorl %eax, %eax subw %di, %cx shrw $2, %cx rep; stosl # Jump to C code (should not return) calll main # Setup corrupt somehow... setup_bad: movl $setup_corrupt, %eax calll puts # Fall through... .globl die .type die, @function die: hlt jmp die .size die, .-die .section ".initdata", "a" setup_corrupt: .byte 7 .string "No setup signature found...\n"
aixcc-public/challenge-001-exemplar-source
1,045
arch/x86/boot/copy.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* ----------------------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * * ----------------------------------------------------------------------- */ #include <linux/linkage.h> /* * Memory copy routines */ .code16 .text SYM_FUNC_START_NOALIGN(memcpy) pushw %si pushw %di movw %ax, %di movw %dx, %si pushw %cx shrw $2, %cx rep; movsl popw %cx andw $3, %cx rep; movsb popw %di popw %si retl SYM_FUNC_END(memcpy) SYM_FUNC_START_NOALIGN(memset) pushw %di movw %ax, %di movzbl %dl, %eax imull $0x01010101,%eax pushw %cx shrw $2, %cx rep; stosl popw %cx andw $3, %cx rep; stosb popw %di retl SYM_FUNC_END(memset) SYM_FUNC_START_NOALIGN(copy_from_fs) pushw %ds pushw %fs popw %ds calll memcpy popw %ds retl SYM_FUNC_END(copy_from_fs) SYM_FUNC_START_NOALIGN(copy_to_fs) pushw %es pushw %fs popw %es calll memcpy popw %es retl SYM_FUNC_END(copy_to_fs)
aixcc-public/challenge-001-exemplar-source
1,705
arch/x86/boot/pmjump.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* ----------------------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * * ----------------------------------------------------------------------- */ /* * The actual transition into protected mode */ #include <asm/boot.h> #include <asm/processor-flags.h> #include <asm/segment.h> #include <linux/linkage.h> .text .code16 /* * void protected_mode_jump(u32 entrypoint, u32 bootparams); */ SYM_FUNC_START_NOALIGN(protected_mode_jump) movl %edx, %esi # Pointer to boot_params table xorl %ebx, %ebx movw %cs, %bx shll $4, %ebx addl %ebx, 2f jmp 1f # Short jump to serialize on 386/486 1: movw $__BOOT_DS, %cx movw $__BOOT_TSS, %di movl %cr0, %edx orb $X86_CR0_PE, %dl # Protected mode movl %edx, %cr0 # Transition to 32-bit mode .byte 0x66, 0xea # ljmpl opcode 2: .long .Lin_pm32 # offset .word __BOOT_CS # segment SYM_FUNC_END(protected_mode_jump) .code32 .section ".text32","ax" SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32) # Set up data segments for flat 32-bit mode movl %ecx, %ds movl %ecx, %es movl %ecx, %fs movl %ecx, %gs movl %ecx, %ss # The 32-bit code sets up its own stack, but this way we do have # a valid stack if some debugging hack wants to use it. addl %ebx, %esp # Set up TR to make Intel VT happy ltr %di # Clear registers to allow for future extensions to the # 32-bit boot protocol xorl %ecx, %ecx xorl %edx, %edx xorl %ebx, %ebx xorl %ebp, %ebp xorl %edi, %edi # Set up LDTR to make Intel VT happy lldt %cx jmpl *%eax # Jump to the 32-bit entrypoint SYM_FUNC_END(.Lin_pm32)
aixcc-public/challenge-001-exemplar-source
43,654
arch/x86/entry/entry_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/x86_64/entry.S * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * * entry.S contains the system-call and fault low-level handling routines. * * Some of this is documented in Documentation/x86/entry_64.rst * * A note on terminology: * - iret frame: Architecture defined interrupt frame from SS to RIP * at the top of the kernel process stack. * * Some macro usage: * - SYM_FUNC_START/END:Define functions in the symbol table. * - idtentry: Define exception entry points. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/msr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/hw_irq.h> #include <asm/page_types.h> #include <asm/irqflags.h> #include <asm/paravirt.h> #include <asm/percpu.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/pgtable_types.h> #include <asm/export.h> #include <asm/frame.h> #include <asm/trapnr.h> #include <asm/nospec-branch.h> #include <asm/fsgsbase.h> #include <linux/err.h> #include "calling.h" .code64 .section .entry.text, "ax" /* * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. * * This is the only entry point used for 64-bit system calls. The * hardware interface is reasonably well designed and the register to * argument mapping Linux uses fits well with the registers that are * available when SYSCALL is used. * * SYSCALL instructions can be found inlined in libc implementations as * well as some other programs and libraries. There are also a handful * of SYSCALL instructions in the vDSO used, for example, as a * clock_gettimeofday fallback. * * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, * then loads new ss, cs, and rip from previously programmed MSRs. * rflags gets masked by a value from another MSR (so CLD and CLAC * are not needed). SYSCALL does not save anything on the stack * and does not change rsp. * * Registers on entry: * rax system call number * rcx return address * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) * rdi arg0 * rsi arg1 * rdx arg2 * r10 arg3 (needs to be moved to rcx to conform to C ABI) * r8 arg4 * r9 arg5 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) * * Only called from user space. * * When user can change pt_regs->foo always force IRET. That is because * it deals with uncanonical addresses better. SYSRET has trouble * with them due to bugs in both AMD and Intel CPUs. */ SYM_CODE_START(entry_SYSCALL_64) UNWIND_HINT_ENTRY ENDBR swapgs /* tss.sp2 is scratch space. */ movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS /* IRQs are off. */ movq %rsp, %rdi /* Sign extend the lower 32bit as syscall numbers are treated as int */ movslq %eax, %rsi /* clobbers %rax, make sure it is after saving the syscall nr */ IBRS_ENTER UNTRAIN_RET call do_syscall_64 /* returns with IRQs disabled */ /* * Try to use SYSRET instead of IRET if we're returning to * a completely clean 64-bit userspace context. If we're not, * go to the slow exit path. * In the Xen PV case we must use iret anyway. */ ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \ X86_FEATURE_XENPV movq RCX(%rsp), %rcx movq RIP(%rsp), %r11 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ jne swapgs_restore_regs_and_return_to_usermode /* * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP * in kernel space. This essentially lets the user take over * the kernel, since userspace controls RSP. * * If width of "canonical tail" ever becomes variable, this will need * to be updated to remain correct on both old and new CPUs. * * Change top bits to match most significant bit (47th or 56th bit * depending on paging mode) in the address. */ #ifdef CONFIG_X86_5LEVEL ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 #else shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx #endif /* If this changed %rcx, it was not canonical */ cmpq %rcx, %r11 jne swapgs_restore_regs_and_return_to_usermode cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ jne swapgs_restore_regs_and_return_to_usermode movq R11(%rsp), %r11 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ jne swapgs_restore_regs_and_return_to_usermode /* * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot * restore RF properly. If the slowpath sets it for whatever reason, we * need to restore it correctly. * * SYSRET can restore TF, but unlike IRET, restoring TF results in a * trap from userspace immediately after SYSRET. This would cause an * infinite loop whenever #DB happens with register state that satisfies * the opportunistic SYSRET conditions. For example, single-stepping * this user code: * * movq $stuck_here, %rcx * pushfq * popq %r11 * stuck_here: * * would never get past 'stuck_here'. */ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 jnz swapgs_restore_regs_and_return_to_usermode /* nothing to check for RSP */ cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ jne swapgs_restore_regs_and_return_to_usermode /* * We win! This label is here just for ease of understanding * perf profiles. Nothing jumps here. */ syscall_return_via_sysret: IBRS_EXIT POP_REGS pop_rdi=0 /* * Now all regs are restored except RSP and RDI. * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp UNWIND_HINT_EMPTY pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ STACKLEAK_ERASE_NOCLOBBER SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi popq %rdi popq %rsp SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL) ANNOTATE_NOENDBR swapgs sysretq SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR int3 SYM_CODE_END(entry_SYSCALL_64) /* * %rdi: prev task * %rsi: next task */ .pushsection .text, "ax" SYM_FUNC_START(__switch_to_asm) /* * Save callee-saved registers * This must match the order in inactive_task_frame */ pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 /* switch stack */ movq %rsp, TASK_threadsp(%rdi) movq TASK_threadsp(%rsi), %rsp #ifdef CONFIG_STACKPROTECTOR movq TASK_stack_canary(%rsi), %rbx movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset #endif /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated * with userspace addresses. On CPUs where those concerns * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW /* restore callee-saved registers */ popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp jmp __switch_to SYM_FUNC_END(__switch_to_asm) .popsection /* * A newly forked process directly context switches into this address. * * rax: prev task we switched from * rbx: kernel thread func (NULL for user thread) * r12: kernel thread arg */ .pushsection .text, "ax" SYM_CODE_START(ret_from_fork) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR // copy_thread movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ testq %rbx, %rbx /* from kernel_thread? */ jnz 1f /* kernel threads are uncommon */ 2: UNWIND_HINT_REGS movq %rsp, %rdi call syscall_exit_to_user_mode /* returns with IRQs disabled */ jmp swapgs_restore_regs_and_return_to_usermode 1: /* kernel thread */ UNWIND_HINT_EMPTY movq %r12, %rdi CALL_NOSPEC rbx /* * A kernel thread is allowed to return here after successfully * calling kernel_execve(). Exit to userspace to complete the execve() * syscall. */ movq $0, RAX(%rsp) jmp 2b SYM_CODE_END(ret_from_fork) .popsection .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushq %rax SAVE_FLAGS testl $X86_EFLAGS_IF, %eax jz .Lokay_\@ ud2 .Lokay_\@: popq %rax #endif .endm SYM_CODE_START_LOCAL(xen_error_entry) UNWIND_HINT_FUNC PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 UNTRAIN_RET RET SYM_CODE_END(xen_error_entry) /** * idtentry_body - Macro to emit code calling the C function * @cfunc: C function to be called * @has_error_code: Hardware pushed error code on stack */ .macro idtentry_body cfunc has_error_code:req /* * Call error_entry() and switch to the task stack if from userspace. * * When in XENPV, it is already in the task stack, and it can't fault * for native_iret() nor native_load_gs_index() since XENPV uses its * own pvops for IRET and load_gs_index(). And it doesn't need to * switch the CR3. So it can skip invoking error_entry(). */ ALTERNATIVE "call error_entry; movq %rax, %rsp", \ "call xen_error_entry", X86_FEATURE_XENPV ENCODE_FRAME_POINTER UNWIND_HINT_REGS movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ .if \has_error_code == 1 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ .endif call \cfunc /* For some configurations \cfunc ends up being a noreturn. */ REACHABLE jmp error_return .endm /** * idtentry - Macro to generate entry stubs for simple IDT entries * @vector: Vector number * @asmsym: ASM symbol for the entry point * @cfunc: C function to be called * @has_error_code: Hardware pushed error code on stack * * The macro emits code to set up the kernel context for straight forward * and simple IDT entries. No IST stack, no paranoid entry checks. */ .macro idtentry vector asmsym cfunc has_error_code:req SYM_CODE_START(\asmsym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 ENDBR ASM_CLAC cld .if \has_error_code == 0 pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif .if \vector == X86_TRAP_BP /* * If coming from kernel space, create a 6-word gap to allow the * int3 handler to emulate a call instruction. */ testb $3, CS-ORIG_RAX(%rsp) jnz .Lfrom_usermode_no_gap_\@ .rept 6 pushq 5*8(%rsp) .endr UNWIND_HINT_IRET_REGS offset=8 .Lfrom_usermode_no_gap_\@: .endif idtentry_body \cfunc \has_error_code _ASM_NOKPROBE(\asmsym) SYM_CODE_END(\asmsym) .endm /* * Interrupt entry/exit. * + The interrupt stubs push (vector) onto the stack, which is the error_code * position of idtentry exceptions, and jump to one of the two idtentry points * (common/spurious). * * common_interrupt is a hotpath, align it to a cache line */ .macro idtentry_irq vector cfunc .p2align CONFIG_X86_L1_CACHE_SHIFT idtentry \vector asm_\cfunc \cfunc has_error_code=1 .endm /* * System vectors which invoke their handlers directly and are not * going through the regular common device interrupt handling code. */ .macro idtentry_sysvec vector cfunc idtentry \vector asm_\cfunc \cfunc has_error_code=0 .endm /** * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB * @vector: Vector number * @asmsym: ASM symbol for the entry point * @cfunc: C function to be called * * The macro emits code to set up the kernel context for #MC and #DB * * If the entry comes from user space it uses the normal entry path * including the return to user space work and preemption checks on * exit. * * If hits in kernel mode then it needs to go through the paranoid * entry as the exception can hit any random state. No preemption * check on exit to keep the paranoid path simple. */ .macro idtentry_mce_db vector asmsym cfunc SYM_CODE_START(\asmsym) UNWIND_HINT_IRET_REGS ENDBR ASM_CLAC cld pushq $-1 /* ORIG_RAX: no syscall to restart */ /* * If the entry is from userspace, switch stacks and treat it as * a normal entry. */ testb $3, CS-ORIG_RAX(%rsp) jnz .Lfrom_usermode_switch_stack_\@ /* paranoid_entry returns GS information for paranoid_exit in EBX. */ call paranoid_entry UNWIND_HINT_REGS movq %rsp, %rdi /* pt_regs pointer */ call \cfunc jmp paranoid_exit /* Switch to the regular task stack and use the noist entry point */ .Lfrom_usermode_switch_stack_\@: idtentry_body noist_\cfunc, has_error_code=0 _ASM_NOKPROBE(\asmsym) SYM_CODE_END(\asmsym) .endm #ifdef CONFIG_AMD_MEM_ENCRYPT /** * idtentry_vc - Macro to generate entry stub for #VC * @vector: Vector number * @asmsym: ASM symbol for the entry point * @cfunc: C function to be called * * The macro emits code to set up the kernel context for #VC. The #VC handler * runs on an IST stack and needs to be able to cause nested #VC exceptions. * * To make this work the #VC entry code tries its best to pretend it doesn't use * an IST stack by switching to the task stack if coming from user-space (which * includes early SYSCALL entry path) or back to the stack in the IRET frame if * entered from kernel-mode. * * If entered from kernel-mode the return stack is validated first, and if it is * not safe to use (e.g. because it points to the entry stack) the #VC handler * will switch to a fall-back stack (VC2) and call a special handler function. * * The macro is only used for one vector, but it is planned to be extended in * the future for the #HV exception. */ .macro idtentry_vc vector asmsym cfunc SYM_CODE_START(\asmsym) UNWIND_HINT_IRET_REGS ENDBR ASM_CLAC cld /* * If the entry is from userspace, switch stacks and treat it as * a normal entry. */ testb $3, CS-ORIG_RAX(%rsp) jnz .Lfrom_usermode_switch_stack_\@ /* * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS */ call paranoid_entry UNWIND_HINT_REGS /* * Switch off the IST stack to make it free for nested exceptions. The * vc_switch_off_ist() function will switch back to the interrupted * stack if it is safe to do so. If not it switches to the VC fall-back * stack. */ movq %rsp, %rdi /* pt_regs pointer */ call vc_switch_off_ist movq %rax, %rsp /* Switch to new stack */ ENCODE_FRAME_POINTER UNWIND_HINT_REGS /* Update pt_regs */ movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ movq %rsp, %rdi /* pt_regs pointer */ call kernel_\cfunc /* * No need to switch back to the IST stack. The current stack is either * identical to the stack in the IRET frame or the VC fall-back stack, * so it is definitely mapped even with PTI enabled. */ jmp paranoid_exit /* Switch to the regular task stack */ .Lfrom_usermode_switch_stack_\@: idtentry_body user_\cfunc, has_error_code=1 _ASM_NOKPROBE(\asmsym) SYM_CODE_END(\asmsym) .endm #endif /* * Double fault entry. Straight paranoid. No checks from which context * this comes because for the espfix induced #DF this would do the wrong * thing. */ .macro idtentry_df vector asmsym cfunc SYM_CODE_START(\asmsym) UNWIND_HINT_IRET_REGS offset=8 ENDBR ASM_CLAC cld /* paranoid_entry returns GS information for paranoid_exit in EBX. */ call paranoid_entry UNWIND_HINT_REGS movq %rsp, %rdi /* pt_regs pointer into first argument */ movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ call \cfunc /* For some configurations \cfunc ends up being a noreturn. */ REACHABLE jmp paranoid_exit _ASM_NOKPROBE(\asmsym) SYM_CODE_END(\asmsym) .endm /* * Include the defines which emit the idt entries which are shared * shared between 32 and 64 bit and emit the __irqentry_text_* markers * so the stacktrace boundary checks work. */ .align 16 .globl __irqentry_text_start __irqentry_text_start: #include <asm/idtentry.h> .align 16 .globl __irqentry_text_end __irqentry_text_end: ANNOTATE_NOENDBR SYM_CODE_START_LOCAL(common_interrupt_return) SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) IBRS_EXIT #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) jnz 1f ud2 1: #endif #ifdef CONFIG_XEN_PV ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV #endif POP_REGS pop_rdi=0 /* * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp UNWIND_HINT_EMPTY /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ pushq 5*8(%rdi) /* RSP */ pushq 4*8(%rdi) /* EFLAGS */ pushq 3*8(%rdi) /* CS */ pushq 2*8(%rdi) /* RIP */ /* Push user RDI on the trampoline stack. */ pushq (%rdi) /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ STACKLEAK_ERASE_NOCLOBBER SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi /* Restore RDI. */ popq %rdi swapgs jmp .Lnative_iret SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates kernel mode. */ testb $3, CS(%rsp) jz 1f ud2 1: #endif POP_REGS addq $8, %rsp /* skip regs->orig_ax */ /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization * when returning from IPI handler. */ #ifdef CONFIG_XEN_PV SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL) ANNOTATE_NOENDBR .byte 0xe9 .long .Lnative_iret - (. + 4) #endif .Lnative_iret: UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in * 64-bit mode SS:RSP on the exception stack is always valid. */ #ifdef CONFIG_X86_ESPFIX64 testb $4, (SS-RIP)(%rsp) jnz native_irq_return_ldt #endif SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) ANNOTATE_NOENDBR // exc_double_fault /* * This may fault. Non-paranoid faults on return to userspace are * handled by fixup_bad_iret. These include #SS, #GP, and #NP. * Double-faults due to espfix64 are handled in exc_double_fault. * Other faults here are fatal. */ iretq #ifdef CONFIG_X86_ESPFIX64 native_irq_return_ldt: /* * We are running with user GSBASE. All GPRs contain their user * values. We have a percpu ESPFIX stack that is eight slots * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom * of the ESPFIX stack. * * We clobber RAX and RDI in this code. We stash RDI on the * normal stack and RAX on the ESPFIX stack. * * The ESPFIX stack layout we set up looks like this: * * --- top of ESPFIX stack --- * SS * RSP * RFLAGS * CS * RIP <-- RSP points here when we're done * RAX <-- espfix_waddr points here * --- bottom of ESPFIX stack --- */ pushq %rdi /* Stash user RDI */ swapgs /* to kernel GS */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ movq (1*8)(%rsp), %rax /* user RIP */ movq %rax, (1*8)(%rdi) movq (2*8)(%rsp), %rax /* user CS */ movq %rax, (2*8)(%rdi) movq (3*8)(%rsp), %rax /* user RFLAGS */ movq %rax, (3*8)(%rdi) movq (5*8)(%rsp), %rax /* user SS */ movq %rax, (5*8)(%rdi) movq (4*8)(%rsp), %rax /* user RSP */ movq %rax, (4*8)(%rdi) /* Now RAX == RSP. */ andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ /* * espfix_stack[31:16] == 0. The page tables are set up such that * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of * espfix_waddr for any X. That is, there are 65536 RO aliases of * the same page. Set up RSP so that RSP[31:16] contains the * respective 16 bits of the /userspace/ RSP and RSP nonetheless * still points to an RO alias of the ESPFIX stack. */ orq PER_CPU_VAR(espfix_stack), %rax SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi swapgs /* to user GS */ popq %rdi /* Restore user RDI */ movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 /* * At this point, we cannot write to the stack any more, but we can * still read. */ popq %rax /* Restore user RAX */ /* * RSP now points to an ordinary IRET frame, except that the page * is read-only and RSP[31:16] are preloaded with the userspace * values. We can now IRET back to userspace. */ jmp native_irq_return_iret #endif SYM_CODE_END(common_interrupt_return) _ASM_NOKPROBE(common_interrupt_return) /* * Reload gs selector with exception handling * edi: new selector * * Is in entry.text as it shouldn't be instrumented. */ SYM_FUNC_START(asm_load_gs_index) FRAME_BEGIN swapgs .Lgs_change: ANNOTATE_NOENDBR // error_entry movl %edi, %gs 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE swapgs FRAME_END RET /* running with kernelgs */ .Lbad_gs: swapgs /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ movl $__USER_DS, %eax movl %eax, %gs .endm ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG xorl %eax, %eax movl %eax, %gs jmp 2b _ASM_EXTABLE(.Lgs_change, .Lbad_gs) SYM_FUNC_END(asm_load_gs_index) EXPORT_SYMBOL(asm_load_gs_index) #ifdef CONFIG_XEN_PV /* * A note on the "critical region" in our callback handler. * We want to avoid stacking callback handlers due to events occurring * during handling of the last event. To do this, we keep events disabled * until we've done all processing. HOWEVER, we must enable events before * popping the stack frame (can't be done atomically) and so it would still * be possible to get enough handler activations to overflow the stack. * Although unlikely, bugs of that kind are hard to track down, so we'd * like to avoid the possibility. * So, on entry to the handler we detect whether we interrupted an * existing activation in its critical region -- if so, we pop the current * activation and restart the handler using the previous one. * * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) */ SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) /* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * see the correct pointer to the pt_regs */ UNWIND_HINT_FUNC movq %rdi, %rsp /* we don't return, adjust the stack frame */ UNWIND_HINT_REGS call xen_pv_evtchn_do_upcall jmp error_return SYM_CODE_END(exc_xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. * We get here for two reasons: * 1. Fault while reloading DS, ES, FS or GS * 2. Fault while executing IRET * Category 1 we do not need to fix up as Xen has already reloaded all segment * registers that could be reloaded and zeroed the others. * Category 2 we fix up by killing the current process. We cannot use the * normal Linux return path in this case because if we use the IRET hypercall * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ SYM_CODE_START(xen_failsafe_callback) UNWIND_HINT_EMPTY ENDBR movl %ds, %ecx cmpw %cx, 0x10(%rsp) jne 1f movl %es, %ecx cmpw %cx, 0x18(%rsp) jne 1f movl %fs, %ecx cmpw %cx, 0x20(%rsp) jne 1f movl %gs, %ecx cmpw %cx, 0x28(%rsp) jne 1f /* All segments match their saved values => Category 2 (Bad IRET). */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp pushq $0 /* RIP */ UNWIND_HINT_IRET_REGS offset=8 jmp asm_exc_general_protection 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_return SYM_CODE_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ /* * Save all registers in pt_regs. Return GSBASE related information * in EBX depending on the availability of the FSGSBASE instructions: * * FSGSBASE R/EBX * N 0 -> SWAPGS on exit * 1 -> no SWAPGS on exit * * Y GSBASE value at entry, must be restored in paranoid_exit * * R14 - old CR3 * R15 - old SPEC_CTRL */ SYM_CODE_START_LOCAL(paranoid_entry) UNWIND_HINT_FUNC PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 /* * Always stash CR3 in %r14. This value will be restored, * verbatim, at exit. Needed if paranoid_entry interrupted * another entry that already switched to the user CR3 value * but has not yet returned to userspace. * * This is also why CS (stashed in the "iret frame" by the * hardware at entry) can not be used: this may be a return * to kernel code, but with a user CR3 value. * * Switching CR3 does not depend on kernel GSBASE so it can * be done before switching to the kernel GSBASE. This is * required for FSGSBASE because the kernel GSBASE has to * be retrieved from a kernel internal table. */ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 /* * Handling GSBASE depends on the availability of FSGSBASE. * * Without FSGSBASE the kernel enforces that negative GSBASE * values indicate kernel GSBASE. With FSGSBASE no assumptions * can be made about the GSBASE value when entering from user * space. */ ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE /* * Read the current GSBASE and store it in %rbx unconditionally, * retrieve and set the current CPUs kernel GSBASE. The stored value * has to be restored in paranoid_exit unconditionally. * * The unconditional write to GS base below ensures that no subsequent * loads based on a mispredicted GS base can happen, therefore no LFENCE * is needed here. */ SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx jmp .Lparanoid_gsbase_done .Lparanoid_entry_checkgs: /* EBX = 1 -> kernel GSBASE active, no restore required */ movl $1, %ebx /* * The kernel-enforced convention is a negative GSBASE indicates * a kernel value. No SWAPGS needed on entry and exit. */ movl $MSR_GS_BASE, %ecx rdmsr testl %edx, %edx js .Lparanoid_kernel_gsbase /* EBX = 0 -> SWAPGS required on exit */ xorl %ebx, %ebx swapgs .Lparanoid_kernel_gsbase: FENCE_SWAPGS_KERNEL_ENTRY .Lparanoid_gsbase_done: /* * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like * CR3 above, keep the old value in a callee saved register. */ IBRS_ENTER save_reg=%r15 UNTRAIN_RET RET SYM_CODE_END(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked * only on return from non-NMI IST interrupts that came * from kernel space. * * We may be returning to very strange contexts (e.g. very early * in syscall entry), so checking for preemption here would * be complicated. Fortunately, there's no good reason to try * to handle preemption here. * * R/EBX contains the GSBASE related information depending on the * availability of the FSGSBASE instructions: * * FSGSBASE R/EBX * N 0 -> SWAPGS on exit * 1 -> no SWAPGS on exit * * Y User space GSBASE, must be restored unconditionally * * R14 - old CR3 * R15 - old SPEC_CTRL */ SYM_CODE_START_LOCAL(paranoid_exit) UNWIND_HINT_REGS /* * Must restore IBRS state before both CR3 and %GS since we need access * to the per-CPU x86_spec_ctrl_shadow variable. */ IBRS_EXIT save_reg=%r15 /* * The order of operations is important. RESTORE_CR3 requires * kernel GSBASE. * * NB to anyone to try to optimize this code: this code does * not execute at all for exceptions from user mode. Those * exceptions go through error_exit instead. */ RESTORE_CR3 scratch_reg=%rax save_reg=%r14 /* Handle the three GSBASE cases */ ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE /* With FSGSBASE enabled, unconditionally restore GSBASE */ wrgsbase %rbx jmp restore_regs_and_return_to_kernel .Lparanoid_exit_checkgs: /* On non-FSGSBASE systems, conditionally do SWAPGS */ testl %ebx, %ebx jnz restore_regs_and_return_to_kernel /* We are returning to a context with user GSBASE */ swapgs jmp restore_regs_and_return_to_kernel SYM_CODE_END(paranoid_exit) /* * Switch GS and CR3 if needed. */ SYM_CODE_START_LOCAL(error_entry) UNWIND_HINT_FUNC PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 testb $3, CS+8(%rsp) jz .Lerror_kernelspace /* * We entered from user mode or we're pretending to have entered * from user mode due to an IRET fault. */ swapgs FENCE_SWAPGS_USER_ENTRY /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax IBRS_ENTER UNTRAIN_RET leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ .Lerror_entry_from_usermode_after_swapgs: /* Put us onto the real thread stack. */ call sync_regs RET /* * There are two places in the kernel that can potentially fault with * usergs. Handle them here. B stepping K8s sometimes report a * truncated RIP for IRET exceptions returning to compat mode. Check * for these here too. */ .Lerror_kernelspace: leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) je .Lerror_bad_iret movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) je .Lbstep_iret cmpq $.Lgs_change, RIP+8(%rsp) jne .Lerror_entry_done_lfence /* * hack: .Lgs_change can fail with user gsbase. If this happens, fix up * gsbase and proceed. We'll fix up the exception and land in * .Lgs_change's error handler with kernel gsbase. */ swapgs /* * Issue an LFENCE to prevent GS speculation, regardless of whether it is a * kernel or user gsbase. */ .Lerror_entry_done_lfence: FENCE_SWAPGS_KERNEL_ENTRY leaq 8(%rsp), %rax /* return pt_regs pointer */ ANNOTATE_UNRET_END RET .Lbstep_iret: /* Fix truncated RIP */ movq %rcx, RIP+8(%rsp) /* fall through */ .Lerror_bad_iret: /* * We came from an IRET to user mode, so we have user * gsbase and CR3. Switch to kernel gsbase and CR3: */ swapgs FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax IBRS_ENTER UNTRAIN_RET /* * Pretend that the exception came from user mode: set up pt_regs * as if we faulted immediately after IRET. */ leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ call fixup_bad_iret mov %rax, %rdi jmp .Lerror_entry_from_usermode_after_swapgs SYM_CODE_END(error_entry) SYM_CODE_START_LOCAL(error_return) UNWIND_HINT_REGS DEBUG_ENTRY_ASSERT_IRQS_OFF testb $3, CS(%rsp) jz restore_regs_and_return_to_kernel jmp swapgs_restore_regs_and_return_to_usermode SYM_CODE_END(error_return) /* * Runs on exception stack. Xen PV does not go through this path at all, * so we can use real assembly here. * * Registers: * %r14: Used to save/restore the CR3 of the interrupted context * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ SYM_CODE_START(asm_exc_nmi) UNWIND_HINT_IRET_REGS ENDBR /* * We allow breakpoints in NMIs. If a breakpoint occurs, then * the iretq it performs will take us out of NMI context. * This means that we can have nested NMIs where the next * NMI is using the top of the stack of the previous NMI. We * can't let it execute because the nested NMI will corrupt the * stack of the previous NMI. NMI handlers are not re-entrant * anyway. * * To handle this case we do the following: * Check the a special location on the stack that contains * a variable that is set when NMIs are executing. * The interrupted task's stack is also checked to see if it * is an NMI stack. * If the variable is not set and the stack is not the NMI * stack then: * o Set the special variable on the stack * o Copy the interrupt frame into an "outermost" location on the * stack * o Copy the interrupt frame into an "iret" location on the stack * o Continue processing the NMI * If the variable is set or the previous stack is the NMI stack: * o Modify the "iret" location to jump to the repeat_nmi * o return back to the first NMI * * Now on exit of the first NMI, we first clear the stack variable * The NMI stack will tell any nested NMIs at that point that it is * nested. Then we pop the stack normally with iret, and if there was * a nested NMI that updated the copy interrupt stack frame, a * jump will be made to the repeat_nmi code that will handle the second * NMI. * * However, espfix prevents us from directly returning to userspace * with a single IRET instruction. Similarly, IRET to user mode * can fault. We therefore handle NMIs from user space like * other IST entries. */ ASM_CLAC cld /* Use %rdx as our temp variable throughout */ pushq %rdx testb $3, CS-RIP+8(%rsp) jz .Lnmi_from_kernel /* * NMI from user mode. We need to run on the thread stack, but we * can't go through the normal entry paths: NMIs are masked, and * we don't want to enable interrupts, because then we'll end * up in an awkward situation in which IRQs are on but NMIs * are off. * * We also must not push anything to the stack before switching * stacks lest we corrupt the "NMI executing" variable. */ swapgs FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 pushq 5*8(%rdx) /* pt_regs->ss */ pushq 4*8(%rdx) /* pt_regs->rsp */ pushq 3*8(%rdx) /* pt_regs->flags */ pushq 2*8(%rdx) /* pt_regs->cs */ pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER IBRS_ENTER UNTRAIN_RET /* * At this point we no longer need to worry about stack damage * due to nesting -- we're on the normal thread stack and we're * done with the NMI stack. */ movq %rsp, %rdi movq $-1, %rsi call exc_nmi /* * Return back to user mode. We must *not* do the normal exit * work, because we don't want to enable interrupts. */ jmp swapgs_restore_regs_and_return_to_usermode .Lnmi_from_kernel: /* * Here's what our stack frame will look like: * +---------------------------------------------------------+ * | original SS | * | original Return RSP | * | original RFLAGS | * | original CS | * | original RIP | * +---------------------------------------------------------+ * | temp storage for rdx | * +---------------------------------------------------------+ * | "NMI executing" variable | * +---------------------------------------------------------+ * | iret SS } Copied from "outermost" frame | * | iret Return RSP } on each loop iteration; overwritten | * | iret RFLAGS } by a nested NMI to force another | * | iret CS } iteration if needed. | * | iret RIP } | * +---------------------------------------------------------+ * | outermost SS } initialized in first_nmi; | * | outermost Return RSP } will not be changed before | * | outermost RFLAGS } NMI processing is done. | * | outermost CS } Copied to "iret" frame on each | * | outermost RIP } iteration. | * +---------------------------------------------------------+ * | pt_regs | * +---------------------------------------------------------+ * * The "original" frame is used by hardware. Before re-enabling * NMIs, we need to be done with it, and we need to leave enough * space for the asm code here. * * We return by executing IRET while RSP points to the "iret" frame. * That will either return for real or it will loop back into NMI * processing. * * The "outermost" frame is copied to the "iret" frame on each * iteration of the loop, so each iteration starts with the "iret" * frame pointing to the final return target. */ /* * Determine whether we're a nested NMI. * * If we interrupted kernel code between repeat_nmi and * end_repeat_nmi, then we are a nested NMI. We must not * modify the "iret" frame because it's being written by * the outer NMI. That's okay; the outer NMI handler is * about to about to call exc_nmi() anyway, so we can just * resume the outer NMI. */ movq $repeat_nmi, %rdx cmpq 8(%rsp), %rdx ja 1f movq $end_repeat_nmi, %rdx cmpq 8(%rsp), %rdx ja nested_nmi_out 1: /* * Now check "NMI executing". If it's set, then we're nested. * This will not detect if we interrupted an outer NMI just * before IRET. */ cmpl $1, -8(%rsp) je nested_nmi /* * Now test if the previous stack was an NMI stack. This covers * the case where we interrupt an outer NMI after it clears * "NMI executing" but before IRET. We need to be careful, though: * there is one case in which RSP could point to the NMI stack * despite there being no NMI active: naughty userspace controls * RSP at the very beginning of the SYSCALL targets. We can * pull a fast one on naughty userspace, though: we program * SYSCALL to mask DF, so userspace cannot cause DF to be set * if it controls the kernel's RSP. We set DF before we clear * "NMI executing". */ lea 6*8(%rsp), %rdx /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ cmpq %rdx, 4*8(%rsp) /* If the stack pointer is above the NMI stack, this is a normal NMI */ ja first_nmi subq $EXCEPTION_STKSZ, %rdx cmpq %rdx, 4*8(%rsp) /* If it is below the NMI stack, it is a normal NMI */ jb first_nmi /* Ah, it is within the NMI stack. */ testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) jz first_nmi /* RSP was user controlled. */ /* This is a nested NMI. */ nested_nmi: /* * Modify the "iret" frame to point to repeat_nmi, forcing another * iteration of NMI handling. */ subq $8, %rsp leaq -10*8(%rsp), %rdx pushq $__KERNEL_DS pushq %rdx pushfq pushq $__KERNEL_CS pushq $repeat_nmi /* Put stack back */ addq $(6*8), %rsp nested_nmi_out: popq %rdx /* We are returning to kernel mode, so this cannot result in a fault. */ iretq first_nmi: /* Restore rdx. */ movq (%rsp), %rdx /* Make room for "NMI executing". */ pushq $0 /* Leave room for the "iret" frame */ subq $(5*8), %rsp /* Copy the "original" frame to the "outermost" frame */ .rept 5 pushq 11*8(%rsp) .endr UNWIND_HINT_IRET_REGS /* Everything up to here is safe from nested NMIs */ #ifdef CONFIG_DEBUG_ENTRY /* * For ease of testing, unmask NMIs right away. Disabled by * default because IRET is very expensive. */ pushq $0 /* SS */ pushq %rsp /* RSP (minus 8 because of the previous push) */ addq $8, (%rsp) /* Fix up RSP */ pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ iretq /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif repeat_nmi: ANNOTATE_NOENDBR // this code /* * If there was a nested NMI, the first NMI's iret will return * here. But NMIs are still enabled and we can take another * nested NMI. The nested NMI checks the interrupted RIP to see * if it is between repeat_nmi and end_repeat_nmi, and if so * it will just return, as we are about to repeat an NMI anyway. * This makes it safe to copy to the stack frame that a nested * NMI will update. * * RSP is pointing to "outermost RIP". gsbase is unknown, but, if * we're repeating an NMI, gsbase has the same value that it had on * the first iteration. paranoid_entry will load the kernel * gsbase if needed before we call exc_nmi(). "NMI executing" * is zero. */ movq $1, 10*8(%rsp) /* Set "NMI executing". */ /* * Copy the "outermost" frame to the "iret" frame. NMIs that nest * here must not modify the "iret" frame while we're writing to * it or it will end up containing garbage. */ addq $(10*8), %rsp .rept 5 pushq -6*8(%rsp) .endr subq $(5*8), %rsp end_repeat_nmi: ANNOTATE_NOENDBR // this code /* * Everything below this point can be preempted by a nested NMI. * If this happens, then the inner NMI will change the "iret" * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ /* * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit * as we should not be calling schedule in NMI context. * Even with normal interrupts enabled. An NMI should not be * setting NEED_RESCHED or anything that normal interrupts and * exceptions might do. */ call paranoid_entry UNWIND_HINT_REGS movq %rsp, %rdi movq $-1, %rsi call exc_nmi /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */ IBRS_EXIT save_reg=%r15 /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 /* * The above invocation of paranoid_entry stored the GSBASE * related information in R/EBX depending on the availability * of FSGSBASE. * * If FSGSBASE is enabled, restore the saved GSBASE value * unconditionally, otherwise take the conditional SWAPGS path. */ ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE wrgsbase %rbx jmp nmi_restore nmi_no_fsgsbase: /* EBX == 0 -> invoke SWAPGS */ testl %ebx, %ebx jnz nmi_restore nmi_swapgs: swapgs nmi_restore: POP_REGS /* * Skip orig_ax and the "outermost" frame to point RSP at the "iret" * at the "iret" frame. */ addq $6*8, %rsp /* * Clear "NMI executing". Set DF first so that we can easily * distinguish the remaining code between here and IRET from * the SYSCALL entry and exit paths. * * We arguably should just inspect RIP instead, but I (Andy) wrote * this code when I had the misapprehension that Xen PV supported * NMIs, and Xen PV would break that approach. */ std movq $0, 5*8(%rsp) /* clear "NMI executing" */ /* * iretq reads the "iret" frame and exits the NMI stack in a * single instruction. We are returning to kernel mode, so this * cannot result in a fault. Similarly, we don't need to worry * about espfix64 on the way back to kernel mode. */ iretq SYM_CODE_END(asm_exc_nmi) #ifndef CONFIG_IA32_EMULATION /* * This handles SYSCALL from 32-bit code. There is no way to program * MSRs to fully disable 32-bit SYSCALL. */ SYM_CODE_START(ignore_sysret) UNWIND_HINT_EMPTY ENDBR mov $-ENOSYS, %eax sysretl SYM_CODE_END(ignore_sysret) #endif .pushsection .text, "ax" SYM_CODE_START(rewind_stack_and_make_dead) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_REGS call make_task_dead SYM_CODE_END(rewind_stack_and_make_dead) .popsection
aixcc-public/challenge-001-exemplar-source
33,441
arch/x86/entry/entry_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1991,1992 Linus Torvalds * * entry_32.S contains the system-call and low-level fault and trap handling routines. * * Stack layout while running C code: * ptrace needs to have all registers on the stack. * If the order here is changed, it needs to be * updated in fork.c:copy_process(), signal.c:do_signal(), * ptrace.c and ptrace.h * * 0(%esp) - %ebx * 4(%esp) - %ecx * 8(%esp) - %edx * C(%esp) - %esi * 10(%esp) - %edi * 14(%esp) - %ebp * 18(%esp) - %eax * 1C(%esp) - %ds * 20(%esp) - %es * 24(%esp) - %fs * 28(%esp) - unused -- was %gs on old stackprotector kernels * 2C(%esp) - orig_eax * 30(%esp) - %eip * 34(%esp) - %cs * 38(%esp) - %eflags * 3C(%esp) - %oldesp * 40(%esp) - %oldss */ #include <linux/linkage.h> #include <linux/err.h> #include <asm/thread_info.h> #include <asm/irqflags.h> #include <asm/errno.h> #include <asm/segment.h> #include <asm/smp.h> #include <asm/percpu.h> #include <asm/processor-flags.h> #include <asm/irq_vectors.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/frame.h> #include <asm/trapnr.h> #include <asm/nospec-branch.h> #include "calling.h" .section .entry.text, "ax" #define PTI_SWITCH_MASK (1 << PAGE_SHIFT) /* Unconditionally switch to user cr3 */ .macro SWITCH_TO_USER_CR3 scratch_reg:req ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI movl %cr3, \scratch_reg orl $PTI_SWITCH_MASK, \scratch_reg movl \scratch_reg, %cr3 .Lend_\@: .endm .macro BUG_IF_WRONG_CR3 no_user_check=0 #ifdef CONFIG_DEBUG_ENTRY ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI .if \no_user_check == 0 /* coming from usermode? */ testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) jz .Lend_\@ .endif /* On user-cr3? */ movl %cr3, %eax testl $PTI_SWITCH_MASK, %eax jnz .Lend_\@ /* From userspace with kernel cr3 - BUG */ ud2 .Lend_\@: #endif .endm /* * Switch to kernel cr3 if not already loaded and return current cr3 in * \scratch_reg */ .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI movl %cr3, \scratch_reg /* Test if we are already on kernel CR3 */ testl $PTI_SWITCH_MASK, \scratch_reg jz .Lend_\@ andl $(~PTI_SWITCH_MASK), \scratch_reg movl \scratch_reg, %cr3 /* Return original CR3 in \scratch_reg */ orl $PTI_SWITCH_MASK, \scratch_reg .Lend_\@: .endm #define CS_FROM_ENTRY_STACK (1 << 31) #define CS_FROM_USER_CR3 (1 << 30) #define CS_FROM_KERNEL (1 << 29) #define CS_FROM_ESPFIX (1 << 28) .macro FIXUP_FRAME /* * The high bits of the CS dword (__csh) are used for CS_FROM_*. * Clear them in case hardware didn't do this for us. */ andl $0x0000ffff, 4*4(%esp) #ifdef CONFIG_VM86 testl $X86_EFLAGS_VM, 5*4(%esp) jnz .Lfrom_usermode_no_fixup_\@ #endif testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) jnz .Lfrom_usermode_no_fixup_\@ orl $CS_FROM_KERNEL, 4*4(%esp) /* * When we're here from kernel mode; the (exception) stack looks like: * * 6*4(%esp) - <previous context> * 5*4(%esp) - flags * 4*4(%esp) - cs * 3*4(%esp) - ip * 2*4(%esp) - orig_eax * 1*4(%esp) - gs / function * 0*4(%esp) - fs * * Lets build a 5 entry IRET frame after that, such that struct pt_regs * is complete and in particular regs->sp is correct. This gives us * the original 6 entries as gap: * * 14*4(%esp) - <previous context> * 13*4(%esp) - gap / flags * 12*4(%esp) - gap / cs * 11*4(%esp) - gap / ip * 10*4(%esp) - gap / orig_eax * 9*4(%esp) - gap / gs / function * 8*4(%esp) - gap / fs * 7*4(%esp) - ss * 6*4(%esp) - sp * 5*4(%esp) - flags * 4*4(%esp) - cs * 3*4(%esp) - ip * 2*4(%esp) - orig_eax * 1*4(%esp) - gs / function * 0*4(%esp) - fs */ pushl %ss # ss pushl %esp # sp (points at ss) addl $7*4, (%esp) # point sp back at the previous context pushl 7*4(%esp) # flags pushl 7*4(%esp) # cs pushl 7*4(%esp) # ip pushl 7*4(%esp) # orig_eax pushl 7*4(%esp) # gs / function pushl 7*4(%esp) # fs .Lfrom_usermode_no_fixup_\@: .endm .macro IRET_FRAME /* * We're called with %ds, %es, %fs, and %gs from the interrupted * frame, so we shouldn't use them. Also, we may be in ESPFIX * mode and therefore have a nonzero SS base and an offset ESP, * so any attempt to access the stack needs to use SS. (except for * accesses through %esp, which automatically use SS.) */ testl $CS_FROM_KERNEL, 1*4(%esp) jz .Lfinished_frame_\@ /* * Reconstruct the 3 entry IRET frame right after the (modified) * regs->sp without lowering %esp in between, such that an NMI in the * middle doesn't scribble our stack. */ pushl %eax pushl %ecx movl 5*4(%esp), %eax # (modified) regs->sp movl 4*4(%esp), %ecx # flags movl %ecx, %ss:-1*4(%eax) movl 3*4(%esp), %ecx # cs andl $0x0000ffff, %ecx movl %ecx, %ss:-2*4(%eax) movl 2*4(%esp), %ecx # ip movl %ecx, %ss:-3*4(%eax) movl 1*4(%esp), %ecx # eax movl %ecx, %ss:-4*4(%eax) popl %ecx lea -4*4(%eax), %esp popl %eax .Lfinished_frame_\@: .endm .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 cld .if \skip_gs == 0 pushl $0 .endif pushl %fs pushl %eax movl $(__KERNEL_PERCPU), %eax movl %eax, %fs .if \unwind_espfix > 0 UNWIND_ESPFIX_STACK .endif popl %eax FIXUP_FRAME pushl %es pushl %ds pushl \pt_regs_ax pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx movl $(__USER_DS), %edx movl %edx, %ds movl %edx, %es /* Switch to kernel stack if necessary */ .if \switch_stacks > 0 SWITCH_TO_KERNEL_STACK .endif .endm .macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 SAVE_ALL unwind_espfix=\unwind_espfix BUG_IF_WRONG_CR3 /* * Now switch the CR3 when PTI is enabled. * * We can enter with either user or kernel cr3, the code will * store the old cr3 in \cr3_reg and switches to the kernel cr3 * if necessary. */ SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg .Lend_\@: .endm .macro RESTORE_INT_REGS popl %ebx popl %ecx popl %edx popl %esi popl %edi popl %ebp popl %eax .endm .macro RESTORE_REGS pop=0 RESTORE_INT_REGS 1: popl %ds 2: popl %es 3: popl %fs 4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */ IRET_FRAME /* * There is no _ASM_EXTABLE_TYPE_REG() for ASM, however since this is * ASM the registers are known and we can trivially hard-code them. */ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_ZERO|EX_REG_DS) _ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_ZERO|EX_REG_ES) _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS) .endm .macro RESTORE_ALL_NMI cr3_reg:req pop=0 /* * Now switch the CR3 when PTI is enabled. * * We enter with kernel cr3 and switch the cr3 to the value * stored on \cr3_reg, which is either a user or a kernel cr3. */ ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI testl $PTI_SWITCH_MASK, \cr3_reg jz .Lswitched_\@ /* User cr3 in \cr3_reg - write it to hardware cr3 */ movl \cr3_reg, %cr3 .Lswitched_\@: BUG_IF_WRONG_CR3 RESTORE_REGS pop=\pop .endm .macro CHECK_AND_APPLY_ESPFIX #ifdef CONFIG_X86_ESPFIX32 #define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS /* * Warning: PT_OLDSS(%esp) contains the wrong/random values if we * are returning to the kernel. * See comments in process.c:copy_thread() for details. */ movb PT_OLDSS(%esp), %ah movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax jne .Lend_\@ # returning to user-space with LDT SS /* * Setup and switch to ESPFIX stack * * We're returning to userspace with a 16 bit stack. The CPU will not * restore the high word of ESP for us on executing iret... This is an * "official" bug of all the x86-compatible CPUs, which we can work * around to make dosemu and wine happy. We do this by preloading the * high word of ESP with the high word of the userspace ESP while * compensating for the offset by changing to the ESPFIX segment with * a base address that matches for the difference. */ mov %esp, %edx /* load kernel esp */ mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov %dx, %ax /* eax: new kernel esp */ sub %eax, %edx /* offset (low word is 0) */ shr $16, %edx mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ pushl $__ESPFIX_SS pushl %eax /* new kernel esp */ /* * Disable interrupts, but do not irqtrace this section: we * will soon execute iret and the tracer was already set to * the irqstate after the IRET: */ cli lss (%esp), %esp /* switch to espfix segment */ .Lend_\@: #endif /* CONFIG_X86_ESPFIX32 */ .endm /* * Called with pt_regs fully populated and kernel segments loaded, * so we can access PER_CPU and use the integer registers. * * We need to be very careful here with the %esp switch, because an NMI * can happen everywhere. If the NMI handler finds itself on the * entry-stack, it will overwrite the task-stack and everything we * copied there. So allocate the stack-frame on the task-stack and * switch to it before we do any copying. */ .macro SWITCH_TO_KERNEL_STACK BUG_IF_WRONG_CR3 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax /* * %eax now contains the entry cr3 and we carry it forward in * that register for the time this macro runs */ /* Are we on the entry stack? Bail out if not! */ movl PER_CPU_VAR(cpu_entry_area), %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ cmpl $SIZEOF_entry_stack, %ecx jae .Lend_\@ /* Load stack pointer into %esi and %edi */ movl %esp, %esi movl %esi, %edi /* Move %edi to the top of the entry stack */ andl $(MASK_entry_stack), %edi addl $(SIZEOF_entry_stack), %edi /* Load top of task-stack into %edi */ movl TSS_entry2task_stack(%edi), %edi /* Special case - entry from kernel mode via entry stack */ #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS movb PT_CS(%esp), %cl andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx #else movl PT_CS(%esp), %ecx andl $SEGMENT_RPL_MASK, %ecx #endif cmpl $USER_RPL, %ecx jb .Lentry_from_kernel_\@ /* Bytes to copy */ movl $PTREGS_SIZE, %ecx #ifdef CONFIG_VM86 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) jz .Lcopy_pt_regs_\@ /* * Stack-frame contains 4 additional segment registers when * coming from VM86 mode */ addl $(4 * 4), %ecx #endif .Lcopy_pt_regs_\@: /* Allocate frame on task-stack */ subl %ecx, %edi /* Switch to task-stack */ movl %edi, %esp /* * We are now on the task-stack and can safely copy over the * stack-frame */ shrl $2, %ecx cld rep movsl jmp .Lend_\@ .Lentry_from_kernel_\@: /* * This handles the case when we enter the kernel from * kernel-mode and %esp points to the entry-stack. When this * happens we need to switch to the task-stack to run C code, * but switch back to the entry-stack again when we approach * iret and return to the interrupted code-path. This usually * happens when we hit an exception while restoring user-space * segment registers on the way back to user-space or when the * sysenter handler runs with eflags.tf set. * * When we switch to the task-stack here, we can't trust the * contents of the entry-stack anymore, as the exception handler * might be scheduled out or moved to another CPU. Therefore we * copy the complete entry-stack to the task-stack and set a * marker in the iret-frame (bit 31 of the CS dword) to detect * what we've done on the iret path. * * On the iret path we copy everything back and switch to the * entry-stack, so that the interrupted kernel code-path * continues on the same stack it was interrupted with. * * Be aware that an NMI can happen anytime in this code. * * %esi: Entry-Stack pointer (same as %esp) * %edi: Top of the task stack * %eax: CR3 on kernel entry */ /* Calculate number of bytes on the entry stack in %ecx */ movl %esi, %ecx /* %ecx to the top of entry-stack */ andl $(MASK_entry_stack), %ecx addl $(SIZEOF_entry_stack), %ecx /* Number of bytes on the entry stack to %ecx */ sub %esi, %ecx /* Mark stackframe as coming from entry stack */ orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) /* * Test the cr3 used to enter the kernel and add a marker * so that we can switch back to it before iret. */ testl $PTI_SWITCH_MASK, %eax jz .Lcopy_pt_regs_\@ orl $CS_FROM_USER_CR3, PT_CS(%esp) /* * %esi and %edi are unchanged, %ecx contains the number of * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate * the stack-frame on task-stack and copy everything over */ jmp .Lcopy_pt_regs_\@ .Lend_\@: .endm /* * Switch back from the kernel stack to the entry stack. * * The %esp register must point to pt_regs on the task stack. It will * first calculate the size of the stack-frame to copy, depending on * whether we return to VM86 mode or not. With that it uses 'rep movsl' * to copy the contents of the stack over to the entry stack. * * We must be very careful here, as we can't trust the contents of the * task-stack once we switched to the entry-stack. When an NMI happens * while on the entry-stack, the NMI handler will switch back to the top * of the task stack, overwriting our stack-frame we are about to copy. * Therefore we switch the stack only after everything is copied over. */ .macro SWITCH_TO_ENTRY_STACK /* Bytes to copy */ movl $PTREGS_SIZE, %ecx #ifdef CONFIG_VM86 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) jz .Lcopy_pt_regs_\@ /* Additional 4 registers to copy when returning to VM86 mode */ addl $(4 * 4), %ecx .Lcopy_pt_regs_\@: #endif /* Initialize source and destination for movsl */ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi subl %ecx, %edi movl %esp, %esi /* Save future stack pointer in %ebx */ movl %edi, %ebx /* Copy over the stack-frame */ shrl $2, %ecx cld rep movsl /* * Switch to entry-stack - needs to happen after everything is * copied because the NMI handler will overwrite the task-stack * when on entry-stack */ movl %ebx, %esp .Lend_\@: .endm /* * This macro handles the case when we return to kernel-mode on the iret * path and have to switch back to the entry stack and/or user-cr3 * * See the comments below the .Lentry_from_kernel_\@ label in the * SWITCH_TO_KERNEL_STACK macro for more details. */ .macro PARANOID_EXIT_TO_KERNEL_MODE /* * Test if we entered the kernel with the entry-stack. Most * likely we did not, because this code only runs on the * return-to-kernel path. */ testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) jz .Lend_\@ /* Unlikely slow-path */ /* Clear marker from stack-frame */ andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) /* Copy the remaining task-stack contents to entry-stack */ movl %esp, %esi movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi /* Bytes on the task-stack to ecx */ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx subl %esi, %ecx /* Allocate stack-frame on entry-stack */ subl %ecx, %edi /* * Save future stack-pointer, we must not switch until the * copy is done, otherwise the NMI handler could destroy the * contents of the task-stack we are about to copy. */ movl %edi, %ebx /* Do the copy */ shrl $2, %ecx cld rep movsl /* Safe to switch to entry-stack now */ movl %ebx, %esp /* * We came from entry-stack and need to check if we also need to * switch back to user cr3. */ testl $CS_FROM_USER_CR3, PT_CS(%esp) jz .Lend_\@ /* Clear marker from stack-frame */ andl $(~CS_FROM_USER_CR3), PT_CS(%esp) SWITCH_TO_USER_CR3 scratch_reg=%eax .Lend_\@: .endm /** * idtentry - Macro to generate entry stubs for simple IDT entries * @vector: Vector number * @asmsym: ASM symbol for the entry point * @cfunc: C function to be called * @has_error_code: Hardware pushed error code on stack */ .macro idtentry vector asmsym cfunc has_error_code:req SYM_CODE_START(\asmsym) ASM_CLAC cld .if \has_error_code == 0 pushl $0 /* Clear the error code */ .endif /* Push the C-function address into the GS slot */ pushl $\cfunc /* Invoke the common exception entry */ jmp handle_exception SYM_CODE_END(\asmsym) .endm .macro idtentry_irq vector cfunc .p2align CONFIG_X86_L1_CACHE_SHIFT SYM_CODE_START_LOCAL(asm_\cfunc) ASM_CLAC SAVE_ALL switch_stacks=1 ENCODE_FRAME_POINTER movl %esp, %eax movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */ movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */ call \cfunc jmp handle_exception_return SYM_CODE_END(asm_\cfunc) .endm .macro idtentry_sysvec vector cfunc idtentry \vector asm_\cfunc \cfunc has_error_code=0 .endm /* * Include the defines which emit the idt entries which are shared * shared between 32 and 64 bit and emit the __irqentry_text_* markers * so the stacktrace boundary checks work. */ .align 16 .globl __irqentry_text_start __irqentry_text_start: #include <asm/idtentry.h> .align 16 .globl __irqentry_text_end __irqentry_text_end: /* * %eax: prev task * %edx: next task */ .pushsection .text, "ax" SYM_CODE_START(__switch_to_asm) /* * Save callee-saved registers * This must match the order in struct inactive_task_frame */ pushl %ebp pushl %ebx pushl %edi pushl %esi /* * Flags are saved to prevent AC leakage. This could go * away if objtool would have 32bit support to verify * the STAC/CLAC correctness. */ pushfl /* switch stack */ movl %esp, TASK_threadsp(%eax) movl TASK_threadsp(%edx), %esp #ifdef CONFIG_STACKPROTECTOR movl TASK_stack_canary(%edx), %ebx movl %ebx, PER_CPU_VAR(__stack_chk_guard) #endif /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated * with userspace addresses. On CPUs where those concerns * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW /* Restore flags or the incoming task to restore AC state. */ popfl /* restore callee-saved registers */ popl %esi popl %edi popl %ebx popl %ebp jmp __switch_to SYM_CODE_END(__switch_to_asm) .popsection /* * The unwinder expects the last frame on the stack to always be at the same * offset from the end of the page, which allows it to validate the stack. * Calling schedule_tail() directly would break that convention because its an * asmlinkage function so its argument has to be pushed on the stack. This * wrapper creates a proper "end of stack" frame header before the call. */ .pushsection .text, "ax" SYM_FUNC_START(schedule_tail_wrapper) FRAME_BEGIN pushl %eax call schedule_tail popl %eax FRAME_END RET SYM_FUNC_END(schedule_tail_wrapper) .popsection /* * A newly forked process directly context switches into this address. * * eax: prev task we switched from * ebx: kernel thread func (NULL for user thread) * edi: kernel thread arg */ .pushsection .text, "ax" SYM_CODE_START(ret_from_fork) call schedule_tail_wrapper testl %ebx, %ebx jnz 1f /* kernel threads are uncommon */ 2: /* When we fork, we trace the syscall return in the child, too. */ movl %esp, %eax call syscall_exit_to_user_mode jmp .Lsyscall_32_done /* kernel thread */ 1: movl %edi, %eax CALL_NOSPEC ebx /* * A kernel thread is allowed to return here after successfully * calling kernel_execve(). Exit to userspace to complete the execve() * syscall. */ movl $0, PT_EAX(%esp) jmp 2b SYM_CODE_END(ret_from_fork) .popsection SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) /* * All code from here through __end_SYSENTER_singlestep_region is subject * to being single-stepped if a user program sets TF and executes SYSENTER. * There is absolutely nothing that we can do to prevent this from happening * (thanks Intel!). To keep our handling of this situation as simple as * possible, we handle TF just like AC and NT, except that our #DB handler * will ignore all of the single-step traps generated in this range. */ /* * 32-bit SYSENTER entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * if X86_FEATURE_SEP is available. This is the preferred system call * entry on 32-bit systems. * * The SYSENTER instruction, in principle, should *only* occur in the * vDSO. In practice, a small number of Android devices were shipped * with a copy of Bionic that inlined a SYSENTER instruction. This * never happened in any of Google's Bionic versions -- it only happened * in a narrow range of Intel-provided versions. * * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. * IF and VM in RFLAGS are cleared (IOW: interrupts are off). * SYSENTER does not save anything on the stack, * and does not save old EIP (!!!), ESP, or EFLAGS. * * To avoid losing track of EFLAGS.VM (and thus potentially corrupting * user and/or vm86 state), we explicitly disable the SYSENTER * instruction in vm86 mode by reprogramming the MSRs. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp user stack * 0(%ebp) arg6 */ SYM_FUNC_START(entry_SYSENTER_32) /* * On entry-stack with all userspace-regs live - save and * restore eflags and %eax to use it as scratch-reg for the cr3 * switch. */ pushfl pushl %eax BUG_IF_WRONG_CR3 no_user_check=1 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax popl %eax popfl /* Stack empty again, switch to task stack */ movl TSS_entry2task_stack(%esp), %esp .Lsysenter_past_esp: pushl $__USER_DS /* pt_regs->ss */ pushl $0 /* pt_regs->sp (placeholder) */ pushfl /* pt_regs->flags (except IF = 0) */ pushl $__USER_CS /* pt_regs->cs */ pushl $0 /* pt_regs->ip = 0 (placeholder) */ pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ /* * SYSENTER doesn't filter flags, so we need to clear NT, AC * and TF ourselves. To save a few cycles, we can check whether * either was set instead of doing an unconditional popfq. * This needs to happen before enabling interrupts so that * we don't get preempted with NT set. * * If TF is set, we will single-step all the way to here -- do_debug * will ignore all the traps. (Yes, this is slow, but so is * single-stepping in general. This allows us to avoid having * a more complicated code to handle the case where a user program * forces us to single-step through the SYSENTER entry code.) * * NB.: .Lsysenter_fix_flags is a label with the code under it moved * out-of-line as an optimization: NT is unlikely to be set in the * majority of the cases and instead of polluting the I$ unnecessarily, * we're keeping that code behind a branch which will predict as * not-taken and therefore its instructions won't be fetched. */ testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) jnz .Lsysenter_fix_flags .Lsysenter_flags_fixed: movl %esp, %eax call do_SYSENTER_32 testl %eax, %eax jz .Lsyscall_32_done STACKLEAK_ERASE /* Opportunistic SYSEXIT */ /* * Setup entry stack - we keep the pointer in %eax and do the * switch after almost all user-state is restored. */ /* Load entry stack pointer and allocate frame for eflags/eax */ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax subl $(2*4), %eax /* Copy eflags and eax to entry stack */ movl PT_EFLAGS(%esp), %edi movl PT_EAX(%esp), %esi movl %edi, (%eax) movl %esi, 4(%eax) /* Restore user registers and segments */ movl PT_EIP(%esp), %edx /* pt_regs->ip */ movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 1: mov PT_FS(%esp), %fs popl %ebx /* pt_regs->bx */ addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ popl %esi /* pt_regs->si */ popl %edi /* pt_regs->di */ popl %ebp /* pt_regs->bp */ /* Switch to entry stack */ movl %eax, %esp /* Now ready to switch the cr3 */ SWITCH_TO_USER_CR3 scratch_reg=%eax /* * Restore all flags except IF. (We restore IF separately because * STI gives a one-instruction window in which we won't be interrupted, * whereas POPF does not.) */ btrl $X86_EFLAGS_IF_BIT, (%esp) BUG_IF_WRONG_CR3 no_user_check=1 popfl popl %eax /* * Return back to the vDSO, which will pop ecx and edx. * Don't bother with DS and ES (they already contain __USER_DS). */ sti sysexit 2: movl $0, PT_FS(%esp) jmp 1b _ASM_EXTABLE(1b, 2b) .Lsysenter_fix_flags: pushl $X86_EFLAGS_FIXED popfl jmp .Lsysenter_flags_fixed SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) SYM_FUNC_END(entry_SYSENTER_32) /* * 32-bit legacy system call entry. * * 32-bit x86 Linux system calls traditionally used the INT $0x80 * instruction. INT $0x80 lands here. * * This entry point can be used by any 32-bit perform system calls. * Instances of INT $0x80 can be found inline in various programs and * libraries. It is also used by the vDSO's __kernel_vsyscall * fallback for hardware that doesn't support a faster entry method. * Restarted 32-bit system calls also fall back to INT $0x80 * regardless of what instruction was originally used to do the system * call. (64-bit programs can use INT $0x80 as well, but they can * only run on 64-bit kernels and therefore land in * entry_INT80_compat.) * * This is considered a slow path. It is not used by most libc * implementations on modern hardware except during process startup. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp arg6 */ SYM_FUNC_START(entry_INT80_32) ASM_CLAC pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ movl %esp, %eax call do_int80_syscall_32 .Lsyscall_32_done: STACKLEAK_ERASE restore_all_switch_stack: SWITCH_TO_ENTRY_STACK CHECK_AND_APPLY_ESPFIX /* Switch back to user CR3 */ SWITCH_TO_USER_CR3 scratch_reg=%eax BUG_IF_WRONG_CR3 /* Restore user state */ RESTORE_REGS pop=4 # skip orig_eax/error_code .Lirq_return: /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization * when returning from IPI handler and when returning from * scheduler to user-space. */ iret .Lasm_iret_error: pushl $0 # no error code pushl $iret_error #ifdef CONFIG_DEBUG_ENTRY /* * The stack-frame here is the one that iret faulted on, so its a * return-to-user frame. We are on kernel-cr3 because we come here from * the fixup code. This confuses the CR3 checker, so switch to user-cr3 * as the checker expects it. */ pushl %eax SWITCH_TO_USER_CR3 scratch_reg=%eax popl %eax #endif jmp handle_exception _ASM_EXTABLE(.Lirq_return, .Lasm_iret_error) SYM_FUNC_END(entry_INT80_32) .macro FIXUP_ESPFIX_STACK /* * Switch back for ESPFIX stack to the normal zerobased stack * * We can't call C functions using the ESPFIX stack. This code reads * the high word of the segment base from the GDT and swiches to the * normal stack and adjusts ESP with the matching offset. * * We might be on user CR3 here, so percpu data is not mapped and we can't * access the GDT through the percpu segment. Instead, use SGDT to find * the cpu_entry_area alias of the GDT. */ #ifdef CONFIG_X86_ESPFIX32 /* fixup the stack */ pushl %ecx subl $2*4, %esp sgdt (%esp) movl 2(%esp), %ecx /* GDT address */ /* * Careful: ECX is a linear pointer, so we need to force base * zero. %cs is the only known-linear segment we have right now. */ mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ shl $16, %eax addl $2*4, %esp popl %ecx addl %esp, %eax /* the adjusted stack pointer */ pushl $__KERNEL_DS pushl %eax lss (%esp), %esp /* switch to the normal stack segment */ #endif .endm .macro UNWIND_ESPFIX_STACK /* It's safe to clobber %eax, all other regs need to be preserved */ #ifdef CONFIG_X86_ESPFIX32 movl %ss, %eax /* see if on espfix stack */ cmpw $__ESPFIX_SS, %ax jne .Lno_fixup_\@ /* switch to normal stack */ FIXUP_ESPFIX_STACK .Lno_fixup_\@: #endif .endm SYM_CODE_START_LOCAL_NOALIGN(handle_exception) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 ENCODE_FRAME_POINTER movl PT_GS(%esp), %edi # get the function address /* fixup orig %eax */ movl PT_ORIG_EAX(%esp), %edx # get the error code movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart movl %esp, %eax # pt_regs pointer CALL_NOSPEC edi handle_exception_return: #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax #else /* * We can be coming here from child spawned by kernel_thread(). */ movl PT_CS(%esp), %eax andl $SEGMENT_RPL_MASK, %eax #endif cmpl $USER_RPL, %eax # returning to v8086 or userspace ? jnb ret_to_user PARANOID_EXIT_TO_KERNEL_MODE BUG_IF_WRONG_CR3 RESTORE_REGS 4 jmp .Lirq_return ret_to_user: movl %esp, %eax jmp restore_all_switch_stack SYM_CODE_END(handle_exception) SYM_CODE_START(asm_exc_double_fault) 1: /* * This is a task gate handler, not an interrupt gate handler. * The error code is on the stack, but the stack is otherwise * empty. Interrupts are off. Our state is sane with the following * exceptions: * * - CR0.TS is set. "TS" literally means "task switched". * - EFLAGS.NT is set because we're a "nested task". * - The doublefault TSS has back_link set and has been marked busy. * - TR points to the doublefault TSS and the normal TSS is busy. * - CR3 is the normal kernel PGD. This would be delightful, except * that the CPU didn't bother to save the old CR3 anywhere. This * would make it very awkward to return back to the context we came * from. * * The rest of EFLAGS is sanitized for us, so we don't need to * worry about AC or DF. * * Don't even bother popping the error code. It's always zero, * and ignoring it makes us a bit more robust against buggy * hypervisor task gate implementations. * * We will manually undo the task switch instead of doing a * task-switching IRET. */ clts /* clear CR0.TS */ pushl $X86_EFLAGS_FIXED popfl /* clear EFLAGS.NT */ call doublefault_shim /* We don't support returning, so we have no IRET here. */ 1: hlt jmp 1b SYM_CODE_END(asm_exc_double_fault) /* * NMI is doubly nasty. It can happen on the first instruction of * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 * switched stacks. We handle both conditions by simply checking whether we * interrupted kernel code running on the SYSENTER stack. */ SYM_CODE_START(asm_exc_nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 /* * ESPFIX_SS is only ever set on the return to user path * after we've switched to the entry stack. */ pushl %eax movl %ss, %eax cmpw $__ESPFIX_SS, %ax popl %eax je .Lnmi_espfix_stack #endif pushl %eax # pt_regs->orig_ax SAVE_ALL_NMI cr3_reg=%edi ENCODE_FRAME_POINTER xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ cmpl $SIZEOF_entry_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ call exc_nmi jmp .Lnmi_return .Lnmi_from_sysenter_stack: /* * We're on the SYSENTER stack. Switch off. No one (not even debug) * is using the thread stack right now, so it's safe for us to use it. */ movl %esp, %ebx movl PER_CPU_VAR(cpu_current_top_of_stack), %esp call exc_nmi movl %ebx, %esp .Lnmi_return: #ifdef CONFIG_X86_ESPFIX32 testl $CS_FROM_ESPFIX, PT_CS(%esp) jnz .Lnmi_from_espfix #endif CHECK_AND_APPLY_ESPFIX RESTORE_ALL_NMI cr3_reg=%edi pop=4 jmp .Lirq_return #ifdef CONFIG_X86_ESPFIX32 .Lnmi_espfix_stack: /* * Create the pointer to LSS back */ pushl %ss pushl %esp addl $4, (%esp) /* Copy the (short) IRET frame */ pushl 4*4(%esp) # flags pushl 4*4(%esp) # cs pushl 4*4(%esp) # ip pushl %eax # orig_ax SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 ENCODE_FRAME_POINTER /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer jmp .Lnmi_from_sysenter_stack .Lnmi_from_espfix: RESTORE_ALL_NMI cr3_reg=%edi /* * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to * fix up the gap and long frame: * * 3 - original frame (exception) * 2 - ESPFIX block (above) * 6 - gap (FIXUP_FRAME) * 5 - long frame (FIXUP_FRAME) * 1 - orig_ax */ lss (1+5+6)*4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif SYM_CODE_END(asm_exc_nmi) .pushsection .text, "ax" SYM_CODE_START(rewind_stack_and_make_dead) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp call make_task_dead 1: jmp 1b SYM_CODE_END(rewind_stack_and_make_dead) .popsection
aixcc-public/challenge-001-exemplar-source
11,203
arch/x86/entry/entry_64_compat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Compatibility mode system call entry point for x86-64. * * Copyright 2000-2002 Andi Kleen, SuSE Labs. */ #include <asm/asm-offsets.h> #include <asm/current.h> #include <asm/errno.h> #include <asm/ia32_unistd.h> #include <asm/thread_info.h> #include <asm/segment.h> #include <asm/irqflags.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/nospec-branch.h> #include <linux/linkage.h> #include <linux/err.h> #include "calling.h" .section .entry.text, "ax" /* * 32-bit SYSENTER entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * on 64-bit kernels running on Intel CPUs. * * The SYSENTER instruction, in principle, should *only* occur in the * vDSO. In practice, a small number of Android devices were shipped * with a copy of Bionic that inlined a SYSENTER instruction. This * never happened in any of Google's Bionic versions -- it only happened * in a narrow range of Intel-provided versions. * * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs. * IF and VM in RFLAGS are cleared (IOW: interrupts are off). * SYSENTER does not save anything on the stack, * and does not save old RIP (!!!), RSP, or RFLAGS. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp user stack * 0(%ebp) arg6 */ SYM_CODE_START(entry_SYSENTER_compat) UNWIND_HINT_ENTRY ENDBR /* Interrupts are off on entry. */ swapgs pushq %rax SWITCH_TO_KERNEL_CR3 scratch_reg=%rax popq %rax movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ pushq $__USER32_DS /* pt_regs->ss */ pushq $0 /* pt_regs->sp = 0 (placeholder) */ /* * Push flags. This is nasty. First, interrupts are currently * off, but we need pt_regs->flags to have IF set. Second, if TS * was set in usermode, it's still set, and we're singlestepping * through this code. do_SYSENTER_32() will fix up IF. */ pushfq /* pt_regs->flags (except IF = 0) */ pushq $__USER32_CS /* pt_regs->cs */ pushq $0 /* pt_regs->ip = 0 (placeholder) */ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) /* * User tracing code (ptrace or signal handlers) might assume that * the saved RAX contains a 32-bit number when we're invoking a 32-bit * syscall. Just in case the high bits are nonzero, zero-extend * the syscall number. (This could almost certainly be deleted * with no ill effects.) */ movl %eax, %eax pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS UNWIND_HINT_REGS cld IBRS_ENTER UNTRAIN_RET /* * SYSENTER doesn't filter flags, so we need to clear NT and AC * ourselves. To save a few cycles, we can check whether * either was set instead of doing an unconditional popfq. * This needs to happen before enabling interrupts so that * we don't get preempted with NT set. * * If TF is set, we will single-step all the way to here -- do_debug * will ignore all the traps. (Yes, this is slow, but so is * single-stepping in general. This allows us to avoid having * a more complicated code to handle the case where a user program * forces us to single-step through the SYSENTER entry code.) * * NB.: .Lsysenter_fix_flags is a label with the code under it moved * out-of-line as an optimization: NT is unlikely to be set in the * majority of the cases and instead of polluting the I$ unnecessarily, * we're keeping that code behind a branch which will predict as * not-taken and therefore its instructions won't be fetched. */ testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp) jnz .Lsysenter_fix_flags .Lsysenter_flags_fixed: movq %rsp, %rdi call do_SYSENTER_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \ "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV jmp sysret32_from_system_call .Lsysenter_fix_flags: pushq $X86_EFLAGS_FIXED popfq jmp .Lsysenter_flags_fixed SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) ANNOTATE_NOENDBR // is_sysenter_singlestep SYM_CODE_END(entry_SYSENTER_compat) /* * 32-bit SYSCALL entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * on 64-bit kernels running on AMD CPUs. * * The SYSCALL instruction, in principle, should *only* occur in the * vDSO. In practice, it appears that this really is the case. * As evidence: * * - The calling convention for SYSCALL has changed several times without * anyone noticing. * * - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything * user task that did SYSCALL without immediately reloading SS * would randomly crash. * * - Most programmers do not directly target AMD CPUs, and the 32-bit * SYSCALL instruction does not exist on Intel CPUs. Even on AMD * CPUs, Linux disables the SYSCALL instruction on 32-bit kernels * because the SYSCALL instruction in legacy/native 32-bit mode (as * opposed to compat mode) is sufficiently poorly designed as to be * essentially unusable. * * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves * RFLAGS to R11, then loads new SS, CS, and RIP from previously * programmed MSRs. RFLAGS gets masked by a value from another MSR * (so CLD and CLAC are not needed). SYSCALL does not save anything on * the stack and does not change RSP. * * Note: RFLAGS saving+masking-with-MSR happens only in Long mode * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). * * Arguments: * eax system call number * ecx return address * ebx arg1 * ebp arg2 (note: not saved in the stack frame, should not be touched) * edx arg3 * esi arg4 * edi arg5 * esp user stack * 0(%esp) arg6 */ SYM_CODE_START(entry_SYSCALL_compat) UNWIND_HINT_ENTRY ENDBR /* Interrupts are off on entry. */ swapgs /* Stash user ESP */ movl %esp, %r8d /* Use %rsp as scratch reg. User ESP is stashed in r8 */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp /* Switch to the kernel stack */ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Construct struct pt_regs on stack */ pushq $__USER32_DS /* pt_regs->ss */ pushq %r8 /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) movl %eax, %eax /* discard orig_ax high bits */ pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rcx=%rbp rax=$-ENOSYS UNWIND_HINT_REGS IBRS_ENTER UNTRAIN_RET movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \ "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV /* Opportunistic SYSRET */ sysret32_from_system_call: /* * We are not going to return to userspace from the trampoline * stack. So let's erase the thread stack right now. */ STACKLEAK_ERASE IBRS_EXIT movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ addq $RAX, %rsp /* Skip r8-r15 */ popq %rax /* pt_regs->rax */ popq %rdx /* Skip pt_regs->cx */ popq %rdx /* pt_regs->dx */ popq %rsi /* pt_regs->si */ popq %rdi /* pt_regs->di */ /* * USERGS_SYSRET32 does: * GSBASE = user's GS base * EIP = ECX * RFLAGS = R11 * CS = __USER32_CS * SS = __USER_DS * * ECX will not match pt_regs->cx, but we're returning to a vDSO * trampoline that will fix up RCX, so this is okay. * * R12-R15 are callee-saved, so they contain whatever was in them * when the system call started, which is already known to user * code. We zero R8-R10 to avoid info leaks. */ movq RSP-ORIG_RAX(%rsp), %rsp SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored * on the process stack which is not mapped to userspace and * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 * switch until after after the last reference to the process * stack. * * %r8/%r9 are zeroed before the sysret, thus safe to clobber. */ SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d swapgs sysretl SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR int3 SYM_CODE_END(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. * * 32-bit x86 Linux system calls traditionally used the INT $0x80 * instruction. INT $0x80 lands here. * * This entry point can be used by 32-bit and 64-bit programs to perform * 32-bit system calls. Instances of INT $0x80 can be found inline in * various programs and libraries. It is also used by the vDSO's * __kernel_vsyscall fallback for hardware that doesn't support a faster * entry method. Restarted 32-bit system calls also fall back to INT * $0x80 regardless of what instruction was originally used to do the * system call. * * This is considered a slow path. It is not used by most libc * implementations on modern hardware except during process startup. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp arg6 */ SYM_CODE_START(entry_INT80_compat) UNWIND_HINT_ENTRY ENDBR /* * Interrupts are off on entry. */ ASM_CLAC /* Do this early to minimize exposure */ ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV /* * User tracing code (ptrace or signal handlers) might assume that * the saved RAX contains a 32-bit number when we're invoking a 32-bit * syscall. Just in case the high bits are nonzero, zero-extend * the syscall number. (This could almost certainly be deleted * with no ill effects.) */ movl %eax, %eax /* switch to thread stack expects orig_ax and rdi to be pushed */ pushq %rax /* pt_regs->orig_ax */ /* Need to switch before accessing the thread stack. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax /* In the Xen PV case we already run on the thread stack. */ ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV movq %rsp, %rax movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp pushq 5*8(%rax) /* regs->ss */ pushq 4*8(%rax) /* regs->rsp */ pushq 3*8(%rax) /* regs->eflags */ pushq 2*8(%rax) /* regs->cs */ pushq 1*8(%rax) /* regs->ip */ pushq 0*8(%rax) /* regs->orig_ax */ .Lint80_keep_stack: PUSH_AND_CLEAR_REGS rax=$-ENOSYS UNWIND_HINT_REGS cld IBRS_ENTER UNTRAIN_RET movq %rsp, %rdi call do_int80_syscall_32 jmp swapgs_restore_regs_and_return_to_usermode SYM_CODE_END(entry_INT80_compat)
aixcc-public/challenge-001-exemplar-source
1,095
arch/x86/entry/thunk_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Save registers before calling assembly functions. This avoids * disturbance of register allocation in some inline assembly constructs. * Copyright 2001,2002 by Andi Kleen, SuSE Labs. */ #include <linux/linkage.h> #include "calling.h" #include <asm/asm.h> #include <asm/export.h> /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func SYM_FUNC_START_NOALIGN(\name) pushq %rbp movq %rsp, %rbp pushq %rdi pushq %rsi pushq %rdx pushq %rcx pushq %rax pushq %r8 pushq %r9 pushq %r10 pushq %r11 call \func jmp __thunk_restore SYM_FUNC_END(\name) _ASM_NOKPROBE(\name) .endm THUNK preempt_schedule_thunk, preempt_schedule THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace EXPORT_SYMBOL(preempt_schedule_thunk) EXPORT_SYMBOL(preempt_schedule_notrace_thunk) SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore) popq %r11 popq %r10 popq %r9 popq %r8 popq %rax popq %rcx popq %rdx popq %rsi popq %rdi popq %rbp RET _ASM_NOKPROBE(__thunk_restore) SYM_CODE_END(__thunk_restore)
aixcc-public/challenge-001-exemplar-source
4,482
arch/x86/mm/mem_encrypt_boot.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * AMD Memory Encryption Support * * Copyright (C) 2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> */ #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/page.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include <asm/nospec-branch.h> .text .code64 SYM_FUNC_START(sme_encrypt_execute) /* * Entry parameters: * RDI - virtual address for the encrypted mapping * RSI - virtual address for the decrypted mapping * RDX - length to encrypt * RCX - virtual address of the encryption workarea, including: * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) * - intermediate copy buffer (PMD_PAGE_SIZE) * R8 - physical address of the pagetables to use for encryption */ push %rbp movq %rsp, %rbp /* RBP now has original stack pointer */ /* Set up a one page stack in the non-encrypted memory area */ movq %rcx, %rax /* Workarea stack page */ leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */ addq $PAGE_SIZE, %rax /* Workarea encryption routine */ push %r12 movq %rdi, %r10 /* Encrypted area */ movq %rsi, %r11 /* Decrypted area */ movq %rdx, %r12 /* Area length */ /* Copy encryption routine into the workarea */ movq %rax, %rdi /* Workarea encryption routine */ leaq __enc_copy(%rip), %rsi /* Encryption routine */ movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */ rep movsb /* Setup registers for call */ movq %r10, %rdi /* Encrypted area */ movq %r11, %rsi /* Decrypted area */ movq %r8, %rdx /* Pagetables used for encryption */ movq %r12, %rcx /* Area length */ movq %rax, %r8 /* Workarea encryption routine */ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ ANNOTATE_RETPOLINE_SAFE call *%rax /* Call the encryption routine */ pop %r12 movq %rbp, %rsp /* Restore original stack pointer */ pop %rbp /* Offset to __x86_return_thunk would be wrong here */ ANNOTATE_UNRET_SAFE ret int3 SYM_FUNC_END(sme_encrypt_execute) SYM_FUNC_START(__enc_copy) /* * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since * the kernel will be encrypted during the process. So this * routine is defined here and then copied to an area outside * of the kernel where it will remain and run decrypted * during execution. * * On entry the registers must be: * RDI - virtual address for the encrypted mapping * RSI - virtual address for the decrypted mapping * RDX - address of the pagetables to use for encryption * RCX - length of area * R8 - intermediate copy buffer * * RAX - points to this routine * * The area will be encrypted by copying from the non-encrypted * memory space to an intermediate buffer and then copying from the * intermediate buffer back to the encrypted memory space. The physical * addresses of the two mappings are the same which results in the area * being encrypted "in place". */ /* Enable the new page tables */ mov %rdx, %cr3 /* Flush any global TLBs */ mov %cr4, %rdx andq $~X86_CR4_PGE, %rdx mov %rdx, %cr4 orq $X86_CR4_PGE, %rdx mov %rdx, %cr4 push %r15 push %r12 movq %rcx, %r9 /* Save area length */ movq %rdi, %r10 /* Save encrypted area address */ movq %rsi, %r11 /* Save decrypted area address */ /* Set the PAT register PA5 entry to write-protect */ movl $MSR_IA32_CR_PAT, %ecx rdmsr mov %rdx, %r15 /* Save original PAT value */ andl $0xffff00ff, %edx /* Clear PA5 */ orl $0x00000500, %edx /* Set PA5 to WP */ wrmsr wbinvd /* Invalidate any cache entries */ /* Copy/encrypt up to 2MB at a time */ movq $PMD_PAGE_SIZE, %r12 1: cmpq %r12, %r9 jnb 2f movq %r9, %r12 2: movq %r11, %rsi /* Source - decrypted area */ movq %r8, %rdi /* Dest - intermediate copy buffer */ movq %r12, %rcx rep movsb movq %r8, %rsi /* Source - intermediate copy buffer */ movq %r10, %rdi /* Dest - encrypted area */ movq %r12, %rcx rep movsb addq %r12, %r11 addq %r12, %r10 subq %r12, %r9 /* Kernel length decrement */ jnz 1b /* Kernel length not zero? */ /* Restore PAT register */ movl $MSR_IA32_CR_PAT, %ecx rdmsr mov %r15, %rdx /* Restore original PAT value */ wrmsr pop %r12 pop %r15 /* Offset to __x86_return_thunk would be wrong here */ ANNOTATE_UNRET_SAFE ret int3 .L__enc_copy_end: SYM_FUNC_END(__enc_copy)
aixcc-public/challenge-001-exemplar-source
6,430
arch/x86/coco/tdx/tdcall.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/asm-offsets.h> #include <asm/asm.h> #include <asm/frame.h> #include <asm/unwind_hints.h> #include <linux/linkage.h> #include <linux/bits.h> #include <linux/errno.h> #include "../../virt/vmx/tdx/tdxcall.S" /* * Bitmasks of exposed registers (with VMM). */ #define TDX_R10 BIT(10) #define TDX_R11 BIT(11) #define TDX_R12 BIT(12) #define TDX_R13 BIT(13) #define TDX_R14 BIT(14) #define TDX_R15 BIT(15) /* * These registers are clobbered to hold arguments for each * TDVMCALL. They are safe to expose to the VMM. * Each bit in this mask represents a register ID. Bit field * details can be found in TDX GHCI specification, section * titled "TDCALL [TDG.VP.VMCALL] leaf". */ #define TDVMCALL_EXPOSE_REGS_MASK ( TDX_R10 | TDX_R11 | \ TDX_R12 | TDX_R13 | \ TDX_R14 | TDX_R15 ) /* * __tdx_module_call() - Used by TDX guests to request services from * the TDX module (does not include VMM services) using TDCALL instruction. * * Transforms function call register arguments into the TDCALL register ABI. * After TDCALL operation, TDX module output is saved in @out (if it is * provided by the user). * *------------------------------------------------------------------------- * TDCALL ABI: *------------------------------------------------------------------------- * Input Registers: * * RAX - TDCALL Leaf number. * RCX,RDX,R8-R9 - TDCALL Leaf specific input registers. * * Output Registers: * * RAX - TDCALL instruction error code. * RCX,RDX,R8-R11 - TDCALL Leaf specific output registers. * *------------------------------------------------------------------------- * * __tdx_module_call() function ABI: * * @fn (RDI) - TDCALL Leaf ID, moved to RAX * @rcx (RSI) - Input parameter 1, moved to RCX * @rdx (RDX) - Input parameter 2, moved to RDX * @r8 (RCX) - Input parameter 3, moved to R8 * @r9 (R8) - Input parameter 4, moved to R9 * * @out (R9) - struct tdx_module_output pointer * stored temporarily in R12 (not * shared with the TDX module). It * can be NULL. * * Return status of TDCALL via RAX. */ SYM_FUNC_START(__tdx_module_call) FRAME_BEGIN TDX_MODULE_CALL host=0 FRAME_END RET SYM_FUNC_END(__tdx_module_call) /* * __tdx_hypercall() - Make hypercalls to a TDX VMM using TDVMCALL leaf * of TDCALL instruction * * Transforms values in function call argument struct tdx_hypercall_args @args * into the TDCALL register ABI. After TDCALL operation, VMM output is saved * back in @args. * *------------------------------------------------------------------------- * TD VMCALL ABI: *------------------------------------------------------------------------- * * Input Registers: * * RAX - TDCALL instruction leaf number (0 - TDG.VP.VMCALL) * RCX - BITMAP which controls which part of TD Guest GPR * is passed as-is to the VMM and back. * R10 - Set 0 to indicate TDCALL follows standard TDX ABI * specification. Non zero value indicates vendor * specific ABI. * R11 - VMCALL sub function number * RBX, RBP, RDI, RSI - Used to pass VMCALL sub function specific arguments. * R8-R9, R12-R15 - Same as above. * * Output Registers: * * RAX - TDCALL instruction status (Not related to hypercall * output). * R10 - Hypercall output error code. * R11-R15 - Hypercall sub function specific output values. * *------------------------------------------------------------------------- * * __tdx_hypercall() function ABI: * * @args (RDI) - struct tdx_hypercall_args for input and output * @flags (RSI) - TDX_HCALL_* flags * * On successful completion, return the hypercall error code. */ SYM_FUNC_START(__tdx_hypercall) FRAME_BEGIN /* Save callee-saved GPRs as mandated by the x86_64 ABI */ push %r15 push %r14 push %r13 push %r12 /* Mangle function call ABI into TDCALL ABI: */ /* Set TDCALL leaf ID (TDVMCALL (0)) in RAX */ xor %eax, %eax /* Copy hypercall registers from arg struct: */ movq TDX_HYPERCALL_r10(%rdi), %r10 movq TDX_HYPERCALL_r11(%rdi), %r11 movq TDX_HYPERCALL_r12(%rdi), %r12 movq TDX_HYPERCALL_r13(%rdi), %r13 movq TDX_HYPERCALL_r14(%rdi), %r14 movq TDX_HYPERCALL_r15(%rdi), %r15 movl $TDVMCALL_EXPOSE_REGS_MASK, %ecx /* * For the idle loop STI needs to be called directly before the TDCALL * that enters idle (EXIT_REASON_HLT case). STI instruction enables * interrupts only one instruction later. If there is a window between * STI and the instruction that emulates the HALT state, there is a * chance for interrupts to happen in this window, which can delay the * HLT operation indefinitely. Since this is the not the desired * result, conditionally call STI before TDCALL. */ testq $TDX_HCALL_ISSUE_STI, %rsi jz .Lskip_sti sti .Lskip_sti: tdcall /* * RAX==0 indicates a failure of the TDVMCALL mechanism itself and that * something has gone horribly wrong with the TDX module. * * The return status of the hypercall operation is in a separate * register (in R10). Hypercall errors are a part of normal operation * and are handled by callers. */ testq %rax, %rax jne .Lpanic /* TDVMCALL leaf return code is in R10 */ movq %r10, %rax /* Copy hypercall result registers to arg struct if needed */ testq $TDX_HCALL_HAS_OUTPUT, %rsi jz .Lout movq %r10, TDX_HYPERCALL_r10(%rdi) movq %r11, TDX_HYPERCALL_r11(%rdi) movq %r12, TDX_HYPERCALL_r12(%rdi) movq %r13, TDX_HYPERCALL_r13(%rdi) movq %r14, TDX_HYPERCALL_r14(%rdi) movq %r15, TDX_HYPERCALL_r15(%rdi) .Lout: /* * Zero out registers exposed to the VMM to avoid speculative execution * with VMM-controlled values. This needs to include all registers * present in TDVMCALL_EXPOSE_REGS_MASK (except R12-R15). R12-R15 * context will be restored. */ xor %r10d, %r10d xor %r11d, %r11d /* Restore callee-saved GPRs as mandated by the x86_64 ABI */ pop %r12 pop %r13 pop %r14 pop %r15 FRAME_END RET .Lpanic: call __tdx_hypercall_failed /* __tdx_hypercall_failed never returns */ REACHABLE jmp .Lpanic SYM_FUNC_END(__tdx_hypercall)
aixcc-public/challenge-001-exemplar-source
1,624
arch/x86/um/vdso/vdso-layout.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Linker script for vDSO. This is an ELF shared object prelinked to * its virtual address, and with only one read-only segment. * This script controls its layout. */ SECTIONS { . = VDSO_PRELINK + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) } :text .data : { *(.data*) *(.sdata*) *(.got.plt) *(.got) *(.gnu.linkonce.d.*) *(.bss*) *(.dynbss*) *(.gnu.linkonce.b.*) } .altinstructions : { *(.altinstructions) } .altinstr_replacement : { *(.altinstr_replacement) } /* * Align the actual code well away from the non-instruction data. * This is the best thing for the I-cache. */ . = ALIGN(0x100); .text : { *(.text*) } :text =0x90909090 } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; }
aixcc-public/challenge-001-exemplar-source
6,265
arch/x86/realmode/rm/trampoline_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Trampoline.S Derived from Setup.S by Linus Torvalds * * 4 Jan 1997 Michael Chastain: changed to gnu as. * 15 Sept 2005 Eric Biederman: 64bit PIC support * * Entry: CS:IP point to the start of our code, we are * in real mode with no stack, but the rest of the * trampoline page to make our stack and everything else * is a mystery. * * On entry to trampoline_start, the processor is in real mode * with 16-bit addressing and 16-bit data. CS has some value * and IP is zero. Thus, data addresses need to be absolute * (no relocation) and are taken with regard to r_base. * * With the addition of trampoline_level4_pgt this code can * now enter a 64bit kernel that lives at arbitrary 64bit * physical addresses. * * If you work on this file, check the object module with objdump * --full-contents --reloc to make sure there are no relocation * entries. */ #include <linux/linkage.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/segment.h> #include <asm/processor-flags.h> #include <asm/realmode.h> #include "realmode.h" .text .code16 .balign PAGE_SIZE SYM_CODE_START(trampoline_start) cli # We should be safe anyway wbinvd LJMPW_RM(1f) 1: mov %cs, %ax # Code and data in the same place mov %ax, %ds mov %ax, %es mov %ax, %ss # Setup stack movl $rm_stack_end, %esp call verify_cpu # Verify the cpu supports long mode testl %eax, %eax # Check for return code jnz no_longmode .Lswitch_to_protected: /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default * operand size is 16bit. Use lgdtl instead to force operand size * to 32 bit. */ lidtl tr_idt # load idt with 0, 0 lgdtl tr_gdt # load gdt with whatever is appropriate movw $__KERNEL_DS, %dx # Data segment descriptor # Enable protected mode movl $(CR0_STATE & ~X86_CR0_PG), %eax movl %eax, %cr0 # into protected mode # flush prefetch and jump to startup_32 ljmpl $__KERNEL32_CS, $pa_startup_32 no_longmode: hlt jmp no_longmode SYM_CODE_END(trampoline_start) #ifdef CONFIG_AMD_MEM_ENCRYPT /* SEV-ES supports non-zero IP for entry points - no alignment needed */ SYM_CODE_START(sev_es_trampoline_start) cli # We should be safe anyway LJMPW_RM(1f) 1: mov %cs, %ax # Code and data in the same place mov %ax, %ds mov %ax, %es mov %ax, %ss # Setup stack movl $rm_stack_end, %esp jmp .Lswitch_to_protected SYM_CODE_END(sev_es_trampoline_start) #endif /* CONFIG_AMD_MEM_ENCRYPT */ #include "../kernel/verify_cpu.S" .section ".text32","ax" .code32 .balign 4 SYM_CODE_START(startup_32) movl %edx, %ss addl $pa_real_mode_base, %esp movl %edx, %ds movl %edx, %es movl %edx, %fs movl %edx, %gs /* * Check for memory encryption support. This is a safety net in * case BIOS hasn't done the necessary step of setting the bit in * the MSR for this AP. If SME is active and we've gotten this far * then it is safe for us to set the MSR bit and continue. If we * don't we'll eventually crash trying to execute encrypted * instructions. */ btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags jnc .Ldone movl $MSR_AMD64_SYSCFG, %ecx rdmsr bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax jc .Ldone /* * Memory encryption is enabled but the SME enable bit for this * CPU has has not been set. It is safe to set it, so do so. */ wrmsr .Ldone: movl pa_tr_cr4, %eax movl %eax, %cr4 # Enable PAE mode # Setup trampoline 4 level pagetables movl $pa_trampoline_pgd, %eax movl %eax, %cr3 # Set up EFER movl $MSR_EFER, %ecx rdmsr /* * Skip writing to EFER if the register already has desired * value (to avoid #VE for the TDX guest). */ cmp pa_tr_efer, %eax jne .Lwrite_efer cmp pa_tr_efer + 4, %edx je .Ldone_efer .Lwrite_efer: movl pa_tr_efer, %eax movl pa_tr_efer + 4, %edx wrmsr .Ldone_efer: # Enable paging and in turn activate Long Mode. movl $CR0_STATE, %eax movl %eax, %cr0 /* * At this point we're in long mode but in 32bit compatibility mode * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. */ ljmpl $__KERNEL_CS, $pa_startup_64 SYM_CODE_END(startup_32) SYM_CODE_START(pa_trampoline_compat) /* * In compatibility mode. Prep ESP and DX for startup_32, then disable * paging and complete the switch to legacy 32-bit mode. */ movl $rm_stack_end, %esp movw $__KERNEL_DS, %dx movl $(CR0_STATE & ~X86_CR0_PG), %eax movl %eax, %cr0 ljmpl $__KERNEL32_CS, $pa_startup_32 SYM_CODE_END(pa_trampoline_compat) .section ".text64","ax" .code64 .balign 4 SYM_CODE_START(startup_64) # Now jump into the kernel using virtual addresses jmpq *tr_start(%rip) SYM_CODE_END(startup_64) SYM_CODE_START(trampoline_start64) /* * APs start here on a direct transfer from 64-bit BIOS with identity * mapped page tables. Load the kernel's GDT in order to gear down to * 32-bit mode (to handle 4-level vs. 5-level paging), and to (re)load * segment registers. Load the zero IDT so any fault triggers a * shutdown instead of jumping back into BIOS. */ lidt tr_idt(%rip) lgdt tr_gdt64(%rip) ljmpl *tr_compat(%rip) SYM_CODE_END(trampoline_start64) .section ".rodata","a" # Duplicate the global descriptor table # so the kernel can live anywhere .balign 16 SYM_DATA_START(tr_gdt) .short tr_gdt_end - tr_gdt - 1 # gdt limit .long pa_tr_gdt .short 0 .quad 0x00cf9b000000ffff # __KERNEL32_CS .quad 0x00af9b000000ffff # __KERNEL_CS .quad 0x00cf93000000ffff # __KERNEL_DS SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end) SYM_DATA_START(tr_gdt64) .short tr_gdt_end - tr_gdt - 1 # gdt limit .long pa_tr_gdt .long 0 SYM_DATA_END(tr_gdt64) SYM_DATA_START(tr_compat) .long pa_trampoline_compat .short __KERNEL32_CS SYM_DATA_END(tr_compat) .bss .balign PAGE_SIZE SYM_DATA(trampoline_pgd, .space PAGE_SIZE) .balign 8 SYM_DATA_START(trampoline_header) SYM_DATA_LOCAL(tr_start, .space 8) SYM_DATA(tr_efer, .space 8) SYM_DATA(tr_cr4, .space 4) SYM_DATA(tr_flags, .space 4) SYM_DATA_END(trampoline_header) #include "trampoline_common.S"
aixcc-public/challenge-001-exemplar-source
1,035
arch/x86/realmode/rm/header.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Real-mode blob header; this should match realmode.h and be * readonly; for mutable data instead add pointers into the .data * or .bss sections as appropriate. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/segment.h> #include "realmode.h" .section ".header", "a" .balign 16 SYM_DATA_START(real_mode_header) .long pa_text_start .long pa_ro_end /* SMP trampoline */ .long pa_trampoline_start .long pa_trampoline_header #ifdef CONFIG_AMD_MEM_ENCRYPT .long pa_sev_es_trampoline_start #endif #ifdef CONFIG_X86_64 .long pa_trampoline_start64 .long pa_trampoline_pgd; #endif /* ACPI S3 wakeup */ #ifdef CONFIG_ACPI_SLEEP .long pa_wakeup_start .long pa_wakeup_header #endif /* APM/BIOS reboot */ .long pa_machine_real_restart_asm #ifdef CONFIG_X86_64 .long __KERNEL32_CS #endif SYM_DATA_END(real_mode_header) /* End signature, used to verify integrity */ .section ".signature","a" .balign 4 SYM_DATA(end_signature, .long REALMODE_END_SIGNATURE)
aixcc-public/challenge-001-exemplar-source
1,919
arch/x86/realmode/rm/trampoline_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Trampoline.S Derived from Setup.S by Linus Torvalds * * 4 Jan 1997 Michael Chastain: changed to gnu as. * * This is only used for booting secondary CPUs in SMP machine * * Entry: CS:IP point to the start of our code, we are * in real mode with no stack, but the rest of the * trampoline page to make our stack and everything else * is a mystery. * * We jump into arch/x86/kernel/head_32.S. * * On entry to trampoline_start, the processor is in real mode * with 16-bit addressing and 16-bit data. CS has some value * and IP is zero. Thus, we load CS to the physical segment * of the real mode code before doing anything further. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include "realmode.h" .text .code16 .balign PAGE_SIZE SYM_CODE_START(trampoline_start) wbinvd # Needed for NUMA-Q should be harmless for others LJMPW_RM(1f) 1: mov %cs, %ax # Code and data in the same place mov %ax, %ds cli # We should be safe anyway movl tr_start, %eax # where we need to go /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default * operand size is 16bit. Use lgdtl instead to force operand size * to 32 bit. */ lidtl tr_idt # load idt with 0, 0 lgdtl tr_gdt # load gdt with whatever is appropriate movw $1, %dx # protected mode (PE) bit lmsw %dx # into protected mode ljmpl $__BOOT_CS, $pa_startup_32 SYM_CODE_END(trampoline_start) .section ".text32","ax" .code32 SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S jmp *%eax SYM_CODE_END(startup_32) .bss .balign 8 SYM_DATA_START(trampoline_header) SYM_DATA_LOCAL(tr_start, .space 4) SYM_DATA_LOCAL(tr_gdt_pad, .space 2) SYM_DATA_LOCAL(tr_gdt, .space 6) SYM_DATA_END(trampoline_header) #include "trampoline_common.S"
aixcc-public/challenge-001-exemplar-source
3,864
arch/x86/realmode/rm/wakeup_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ACPI wakeup real mode startup stub */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/msr-index.h> #include <asm/page_types.h> #include <asm/pgtable_types.h> #include <asm/processor-flags.h> #include "realmode.h" #include "wakeup.h" .code16 /* This should match the structure in wakeup.h */ .section ".data", "aw" .balign 16 SYM_DATA_START(wakeup_header) video_mode: .short 0 /* Video mode number */ pmode_entry: .long 0 pmode_cs: .short __KERNEL_CS pmode_cr0: .long 0 /* Saved %cr0 */ pmode_cr3: .long 0 /* Saved %cr3 */ pmode_cr4: .long 0 /* Saved %cr4 */ pmode_efer: .quad 0 /* Saved EFER */ pmode_gdt: .quad 0 pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */ pmode_behavior: .long 0 /* Wakeup behavior flags */ realmode_flags: .long 0 real_magic: .long 0 signature: .long WAKEUP_HEADER_SIGNATURE SYM_DATA_END(wakeup_header) .text .code16 .balign 16 SYM_CODE_START(wakeup_start) cli cld LJMPW_RM(3f) 3: /* Apparently some dimwit BIOS programmers don't know how to program a PM to RM transition, and we might end up here with junk in the data segment descriptor registers. The only way to repair that is to go into PM and fix it ourselves... */ movw $16, %cx lgdtl %cs:wakeup_gdt movl %cr0, %eax orb $X86_CR0_PE, %al movl %eax, %cr0 ljmpw $8, $2f 2: movw %cx, %ds movw %cx, %es movw %cx, %ss movw %cx, %fs movw %cx, %gs andb $~X86_CR0_PE, %al movl %eax, %cr0 LJMPW_RM(3f) 3: /* Set up segments */ movw %cs, %ax movw %ax, %ss movl $rm_stack_end, %esp movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs lidtl .Lwakeup_idt /* Clear the EFLAGS */ pushl $0 popfl /* Check header signature... */ movl signature, %eax cmpl $WAKEUP_HEADER_SIGNATURE, %eax jne bogus_real_magic /* Check we really have everything... */ movl end_signature, %eax cmpl $REALMODE_END_SIGNATURE, %eax jne bogus_real_magic /* Call the C code */ calll main /* Restore MISC_ENABLE before entering protected mode, in case BIOS decided to clear XD_DISABLE during S3. */ movl pmode_behavior, %edi btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %edi jnc 1f movl pmode_misc_en, %eax movl pmode_misc_en + 4, %edx movl $MSR_IA32_MISC_ENABLE, %ecx wrmsr 1: /* Do any other stuff... */ #ifndef CONFIG_64BIT /* This could also be done in C code... */ movl pmode_cr3, %eax movl %eax, %cr3 btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi jnc 1f movl pmode_cr4, %eax movl %eax, %cr4 1: btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi jnc 1f movl pmode_efer, %eax movl pmode_efer + 4, %edx movl $MSR_EFER, %ecx wrmsr 1: lgdtl pmode_gdt /* This really couldn't... */ movl pmode_entry, %eax movl pmode_cr0, %ecx movl %ecx, %cr0 ljmpl $__KERNEL_CS, $pa_startup_32 /* -> jmp *%eax in trampoline_32.S */ #else jmp trampoline_start #endif SYM_CODE_END(wakeup_start) bogus_real_magic: 1: hlt jmp 1b .section ".rodata","a" /* * Set up the wakeup GDT. We set these up as Big Real Mode, * that is, with limits set to 4 GB. At least the Lenovo * Thinkpad X61 is known to need this for the video BIOS * initialization quirk to work; this is likely to also * be the case for other laptops or integrated video devices. */ .balign 16 SYM_DATA_START(wakeup_gdt) .word 3*8-1 /* Self-descriptor */ .long pa_wakeup_gdt .word 0 .word 0xffff /* 16-bit code segment @ real_mode_base */ .long 0x9b000000 + pa_real_mode_base .word 0x008f /* big real mode */ .word 0xffff /* 16-bit data segment @ real_mode_base */ .long 0x93000000 + pa_real_mode_base .word 0x008f /* big real mode */ SYM_DATA_END(wakeup_gdt) .section ".rodata","a" .balign 8 /* This is the standard real-mode IDT */ .balign 16 SYM_DATA_START_LOCAL(.Lwakeup_idt) .word 0xffff /* limit */ .long 0 /* address */ .word 0 SYM_DATA_END(.Lwakeup_idt)
aixcc-public/challenge-001-exemplar-source
4,290
arch/x86/realmode/rm/reboot.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include "realmode.h" /* * The following code and data reboots the machine by switching to real * mode and jumping to the BIOS reset entry point, as if the CPU has * really been reset. The previous version asked the keyboard * controller to pulse the CPU reset line, which is more thorough, but * doesn't work with at least one type of 486 motherboard. It is easy * to stop this code working; hence the copious comments. * * This code is called with the restart type (0 = BIOS, 1 = APM) in * the primary argument register (%eax for 32 bit, %edi for 64 bit). */ .section ".text32", "ax" .code32 SYM_CODE_START(machine_real_restart_asm) #ifdef CONFIG_X86_64 /* Switch to trampoline GDT as it is guaranteed < 4 GiB */ movl $__KERNEL_DS, %eax movl %eax, %ds lgdtl pa_tr_gdt /* Disable paging to drop us out of long mode */ movl %cr0, %eax andl $~X86_CR0_PG, %eax movl %eax, %cr0 ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL) xorl %eax, %eax xorl %edx, %edx movl $MSR_EFER, %ecx wrmsr movl %edi, %eax #endif /* CONFIG_X86_64 */ /* Set up the IDT for real mode. */ lidtl pa_machine_real_restart_idt /* * Set up a GDT from which we can load segment descriptors for real * mode. The GDT is not used in real mode; it is just needed here to * prepare the descriptors. */ lgdtl pa_machine_real_restart_gdt /* * Load the data segment registers with 16-bit compatible values */ movl $16, %ecx movl %ecx, %ds movl %ecx, %es movl %ecx, %fs movl %ecx, %gs movl %ecx, %ss ljmpw $8, $1f SYM_CODE_END(machine_real_restart_asm) /* * This is 16-bit protected mode code to disable paging and the cache, * switch to real mode and jump to the BIOS reset code. * * The instruction that switches to real mode by writing to CR0 must be * followed immediately by a far jump instruction, which set CS to a * valid value for real mode, and flushes the prefetch queue to avoid * running instructions that have already been decoded in protected * mode. * * Clears all the flags except ET, especially PG (paging), PE * (protected-mode enable) and TS (task switch for coprocessor state * save). Flushes the TLB after paging has been disabled. Sets CD and * NW, to disable the cache on a 486, and invalidates the cache. This * is more like the state of a 486 after reset. I don't know if * something else should be done for other chips. * * More could be done here to set up the registers as if a CPU reset had * occurred; hopefully real BIOSs don't assume much. This is not the * actual BIOS entry point, anyway (that is at 0xfffffff0). * * Most of this work is probably excessive, but it is what is tested. */ .text .code16 .balign 16 machine_real_restart_asm16: 1: xorl %ecx, %ecx movl %cr0, %edx andl $0x00000011, %edx orl $0x60000000, %edx movl %edx, %cr0 movl %ecx, %cr3 movl %cr0, %edx testl $0x60000000, %edx /* If no cache bits -> no wbinvd */ jz 2f wbinvd 2: andb $0x10, %dl movl %edx, %cr0 LJMPW_RM(3f) 3: andw %ax, %ax jz bios apm: movw $0x1000, %ax movw %ax, %ss movw $0xf000, %sp movw $0x5307, %ax movw $0x0001, %bx movw $0x0003, %cx int $0x15 /* This should never return... */ bios: ljmpw $0xf000, $0xfff0 .section ".rodata", "a" .balign 16 SYM_DATA_START(machine_real_restart_idt) .word 0xffff /* Length - real mode default value */ .long 0 /* Base - real mode default value */ SYM_DATA_END(machine_real_restart_idt) .balign 16 SYM_DATA_START(machine_real_restart_gdt) /* Self-pointer */ .word 0xffff /* Length - real mode default value */ .long pa_machine_real_restart_gdt .word 0 /* * 16-bit code segment pointing to real_mode_seg * Selector value 8 */ .word 0xffff /* Limit */ .long 0x9b000000 + pa_real_mode_base .word 0 /* * 16-bit data segment with the selector value 16 = 0x10 and * base value 0x100; since this is consistent with real mode * semantics we don't have to reload the segments once CR0.PE = 0. */ .quad GDT_ENTRY(0x0093, 0x100, 0xffff) SYM_DATA_END(machine_real_restart_gdt)
aixcc-public/challenge-001-exemplar-source
2,250
arch/x86/platform/olpc/xo1-wakeup.S
/* SPDX-License-Identifier: GPL-2.0 */ .text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable_32.h> .macro writepost,value movb $0x34, %al outb %al, $0x70 movb $\value, %al outb %al, $0x71 .endm wakeup_start: # OFW lands us here, running in protected mode, with a # kernel-compatible GDT already setup. # Clear any dangerous flags pushl $0 popfl writepost 0x31 # Set up %cr3 movl $initial_page_table - __PAGE_OFFSET, %eax movl %eax, %cr3 movl saved_cr4, %eax movl %eax, %cr4 movl saved_cr0, %eax movl %eax, %cr0 # Control registers were modified, pipeline resync is needed jmp 1f 1: movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs lgdt saved_gdt lidt saved_idt lldt saved_ldt ljmp $(__KERNEL_CS),$1f 1: movl %cr3, %eax movl %eax, %cr3 wbinvd # Go back to the return point jmp ret_point save_registers: sgdt saved_gdt sidt saved_idt sldt saved_ldt pushl %edx movl %cr4, %edx movl %edx, saved_cr4 movl %cr0, %edx movl %edx, saved_cr0 popl %edx movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp movl %esi, saved_context_esi movl %edi, saved_context_edi pushfl popl saved_context_eflags RET restore_registers: movl saved_context_ebp, %ebp movl saved_context_ebx, %ebx movl saved_context_esi, %esi movl saved_context_edi, %edi pushl saved_context_eflags popfl RET SYM_CODE_START(do_olpc_suspend_lowlevel) call save_processor_state call save_registers # This is the stack context we want to remember movl %esp, saved_context_esp pushl $3 call xo1_do_sleep jmp wakeup_start .p2align 4,,7 ret_point: movl saved_context_esp, %esp writepost 0x32 call restore_registers call restore_processor_state RET SYM_CODE_END(do_olpc_suspend_lowlevel) .data saved_gdt: .long 0,0 saved_idt: .long 0,0 saved_ldt: .long 0 saved_cr4: .long 0 saved_cr0: .long 0 saved_context_esp: .long 0 saved_context_edi: .long 0 saved_context_esi: .long 0 saved_context_ebx: .long 0 saved_context_ebp: .long 0 saved_context_eflags: .long 0
aixcc-public/challenge-001-exemplar-source
1,178
arch/x86/platform/efi/efi_stub_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * EFI call stub for IA32. * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/asm-offsets.h> #include <asm/page_types.h> __INIT SYM_FUNC_START(efi_call_svam) push %ebp movl %esp, %ebp push %ebx push 16(%esp) push 16(%esp) push %ecx push %edx movl %eax, %ebx // &systab_phys->runtime /* * Switch to the flat mapped alias of this routine, by jumping to the * address of label '1' after subtracting PAGE_OFFSET from it. */ movl $1f, %edx subl $__PAGE_OFFSET, %edx jmp *%edx 1: /* disable paging */ movl %cr0, %edx andl $0x7fffffff, %edx movl %edx, %cr0 /* convert the stack pointer to a flat mapped address */ subl $__PAGE_OFFSET, %esp /* call the EFI routine */ movl (%eax), %eax call *EFI_svam(%eax) /* grab the virtually remapped EFI runtime services table pointer */ movl (%ebx), %ecx movl 36(%esp), %edx // &efi.runtime movl %ecx, (%edx) /* re-enable paging */ movl %cr0, %edx orl $0x80000000, %edx movl %edx, %cr0 movl 16(%esp), %ebx leave RET SYM_FUNC_END(efi_call_svam)
aixcc-public/challenge-001-exemplar-source
2,480
arch/x86/platform/efi/efi_thunk_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Intel Corporation; author Matt Fleming * * Support for invoking 32-bit EFI runtime services from a 64-bit * kernel. * * The below thunking functions are only used after ExitBootServices() * has been called. This simplifies things considerably as compared with * the early EFI thunking because we can leave all the kernel state * intact (GDT, IDT, etc) and simply invoke the 32-bit EFI runtime * services from __KERNEL32_CS. This means we can continue to service * interrupts across an EFI mixed mode call. * * We do however, need to handle the fact that we're running in a full * 64-bit virtual address space. Things like the stack and instruction * addresses need to be accessible by the 32-bit firmware, so we rely on * using the identity mappings in the EFI page table to access the stack * and kernel text (see efi_setup_page_tables()). */ #include <linux/linkage.h> #include <linux/objtool.h> #include <asm/page_types.h> #include <asm/segment.h> .text .code64 SYM_FUNC_START(__efi64_thunk) STACK_FRAME_NON_STANDARD __efi64_thunk push %rbp push %rbx /* * Switch to 1:1 mapped 32-bit stack pointer. */ movq %rsp, %rax movq efi_mixed_mode_stack_pa(%rip), %rsp push %rax /* * Copy args passed via the stack */ subq $0x24, %rsp movq 0x18(%rax), %rbp movq 0x20(%rax), %rbx movq 0x28(%rax), %rax movl %ebp, 0x18(%rsp) movl %ebx, 0x1c(%rsp) movl %eax, 0x20(%rsp) /* * Calculate the physical address of the kernel text. */ movq $__START_KERNEL_map, %rax subq phys_base(%rip), %rax leaq 1f(%rip), %rbp leaq 2f(%rip), %rbx subq %rax, %rbp subq %rax, %rbx movl %ebx, 0x0(%rsp) /* return address */ movl %esi, 0x4(%rsp) movl %edx, 0x8(%rsp) movl %ecx, 0xc(%rsp) movl %r8d, 0x10(%rsp) movl %r9d, 0x14(%rsp) /* Switch to 32-bit descriptor */ pushq $__KERNEL32_CS pushq %rdi /* EFI runtime service address */ lretq // This return instruction is not needed for correctness, as it will // never be reached. It only exists to make objtool happy, which will // otherwise complain about unreachable instructions in the callers. RET SYM_FUNC_END(__efi64_thunk) .section ".rodata", "a", @progbits .balign 16 SYM_DATA_START(__efi64_thunk_ret_tramp) 1: movq 0x20(%rsp), %rsp pop %rbx pop %rbp ret int3 .code32 2: pushl $__KERNEL_CS pushl %ebp lret SYM_DATA_END(__efi64_thunk_ret_tramp) .bss .balign 8 SYM_DATA(efi_mixed_mode_stack_pa, .quad 0)
aixcc-public/challenge-001-exemplar-source
3,978
arch/x86/platform/pvh/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright C 2016, Oracle and/or its affiliates. All rights reserved. */ .code32 .text #define _pa(x) ((x) - __START_KERNEL_map) #include <linux/elfnote.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/asm.h> #include <asm/boot.h> #include <asm/processor-flags.h> #include <asm/msr.h> #include <asm/nospec-branch.h> #include <xen/interface/elfnote.h> __HEAD /* * Entry point for PVH guests. * * Xen ABI specifies the following register state when we come here: * * - `ebx`: contains the physical memory address where the loader has placed * the boot start info structure. * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared. * - `cr4`: all bits are cleared. * - `cs `: must be a 32-bit read/execute code segment with a base of `0` * and a limit of `0xFFFFFFFF`. The selector value is unspecified. * - `ds`, `es`: must be a 32-bit read/write data segment with a base of * `0` and a limit of `0xFFFFFFFF`. The selector values are all * unspecified. * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit * of '0x67'. * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared. * Bit 8 (TF) must be cleared. Other bits are all unspecified. * * All other processor registers and flag bits are unspecified. The OS is in * charge of setting up it's own stack, GDT and IDT. */ #define PVH_GDT_ENTRY_CS 1 #define PVH_GDT_ENTRY_DS 2 #define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8) #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) SYM_CODE_START_LOCAL(pvh_start_xen) UNWIND_HINT_EMPTY cld lgdt (_pa(gdt)) mov $PVH_DS_SEL,%eax mov %eax,%ds mov %eax,%es mov %eax,%ss /* Stash hvm_start_info. */ mov $_pa(pvh_start_info), %edi mov %ebx, %esi mov _pa(pvh_start_info_sz), %ecx shr $2,%ecx rep movsl mov $_pa(early_stack_end), %esp /* Enable PAE mode. */ mov %cr4, %eax orl $X86_CR4_PAE, %eax mov %eax, %cr4 #ifdef CONFIG_X86_64 /* Enable Long mode. */ mov $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr /* Enable pre-constructed page tables. */ mov $_pa(init_top_pgt), %eax mov %eax, %cr3 mov $(X86_CR0_PG | X86_CR0_PE), %eax mov %eax, %cr0 /* Jump to 64-bit mode. */ ljmp $PVH_CS_SEL, $_pa(1f) /* 64-bit entry point. */ .code64 1: /* Set base address in stack canary descriptor. */ mov $MSR_GS_BASE,%ecx mov $_pa(canary), %eax xor %edx, %edx wrmsr call xen_prepare_pvh /* startup_64 expects boot_params in %rsi. */ mov $_pa(pvh_bootparams), %rsi mov $_pa(startup_64), %rax ANNOTATE_RETPOLINE_SAFE jmp *%rax #else /* CONFIG_X86_64 */ call mk_early_pgtbl_32 mov $_pa(initial_page_table), %eax mov %eax, %cr3 mov %cr0, %eax or $(X86_CR0_PG | X86_CR0_PE), %eax mov %eax, %cr0 ljmp $PVH_CS_SEL, $1f 1: call xen_prepare_pvh mov $_pa(pvh_bootparams), %esi /* startup_32 doesn't expect paging and PAE to be on. */ ljmp $PVH_CS_SEL, $_pa(2f) 2: mov %cr0, %eax and $~X86_CR0_PG, %eax mov %eax, %cr0 mov %cr4, %eax and $~X86_CR4_PAE, %eax mov %eax, %cr4 ljmp $PVH_CS_SEL, $_pa(startup_32) #endif SYM_CODE_END(pvh_start_xen) .section ".init.data","aw" .balign 8 SYM_DATA_START_LOCAL(gdt) .word gdt_end - gdt_start .long _pa(gdt_start) .word 0 SYM_DATA_END(gdt) SYM_DATA_START_LOCAL(gdt_start) .quad 0x0000000000000000 /* NULL descriptor */ #ifdef CONFIG_X86_64 .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */ #else .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */ #endif .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */ SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end) .balign 16 SYM_DATA_LOCAL(canary, .fill 48, 1, 0) SYM_DATA_START_LOCAL(early_stack) .fill BOOT_STACK_SIZE, 1, 0 SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end) ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
aixcc-public/challenge-001-exemplar-source
3,196
arch/x86/kernel/acpi/wakeup_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ .text #include <linux/linkage.h> #include <linux/objtool.h> #include <asm/segment.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/asm-offsets.h> #include <asm/frame.h> #include <asm/nospec-branch.h> # Copyright 2003 Pavel Machek <pavel@suse.cz .code64 /* * Hooray, we are in Long 64-bit mode (but still running in low memory) */ SYM_FUNC_START(wakeup_long64) movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax je 2f /* stop here on a saved_magic mismatch */ movq $0xbad6d61676963, %rcx 1: jmp 1b 2: movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movq saved_rsp, %rsp movq saved_rbx, %rbx movq saved_rdi, %rdi movq saved_rsi, %rsi movq saved_rbp, %rbp movq saved_rip, %rax ANNOTATE_RETPOLINE_SAFE jmp *%rax SYM_FUNC_END(wakeup_long64) SYM_FUNC_START(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp xorl %eax, %eax call save_processor_state movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) movq %rsi, pt_regs_si(%rax) movq %rdi, pt_regs_di(%rax) movq %rbx, pt_regs_bx(%rax) movq %rcx, pt_regs_cx(%rax) movq %rdx, pt_regs_dx(%rax) movq %r8, pt_regs_r8(%rax) movq %r9, pt_regs_r9(%rax) movq %r10, pt_regs_r10(%rax) movq %r11, pt_regs_r11(%rax) movq %r12, pt_regs_r12(%rax) movq %r13, pt_regs_r13(%rax) movq %r14, pt_regs_r14(%rax) movq %r15, pt_regs_r15(%rax) pushfq popq pt_regs_flags(%rax) movq $.Lresume_point, saved_rip(%rip) movq %rsp, saved_rsp movq %rbp, saved_rbp movq %rbx, saved_rbx movq %rdi, saved_rdi movq %rsi, saved_rsi addq $8, %rsp movl $3, %edi xorl %eax, %eax call x86_acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp .Lresume_point .align 4 .Lresume_point: /* We don't restore %rax, it must be 0 anyway */ movq $saved_context, %rax movq saved_context_cr4(%rax), %rbx movq %rbx, %cr4 movq saved_context_cr3(%rax), %rbx movq %rbx, %cr3 movq saved_context_cr2(%rax), %rbx movq %rbx, %cr2 movq saved_context_cr0(%rax), %rbx movq %rbx, %cr0 pushq pt_regs_flags(%rax) popfq movq pt_regs_sp(%rax), %rsp movq pt_regs_bp(%rax), %rbp movq pt_regs_si(%rax), %rsi movq pt_regs_di(%rax), %rdi movq pt_regs_bx(%rax), %rbx movq pt_regs_cx(%rax), %rcx movq pt_regs_dx(%rax), %rdx movq pt_regs_r8(%rax), %r8 movq pt_regs_r9(%rax), %r9 movq pt_regs_r10(%rax), %r10 movq pt_regs_r11(%rax), %r11 movq pt_regs_r12(%rax), %r12 movq pt_regs_r13(%rax), %r13 movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) /* * The suspend path may have poisoned some areas deeper in the stack, * which we now need to unpoison. */ movq %rsp, %rdi call kasan_unpoison_task_stack_below #endif xorl %eax, %eax addq $8, %rsp FRAME_END jmp restore_processor_state SYM_FUNC_END(do_suspend_lowlevel) STACK_FRAME_NON_STANDARD do_suspend_lowlevel .data saved_rbp: .quad 0 saved_rsi: .quad 0 saved_rdi: .quad 0 saved_rbx: .quad 0 saved_rip: .quad 0 saved_rsp: .quad 0 SYM_DATA(saved_magic, .quad 0)
aixcc-public/challenge-001-exemplar-source
1,800
arch/x86/kernel/acpi/wakeup_32.S
/* SPDX-License-Identifier: GPL-2.0-only */ .text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> # Copyright 2003, 2008 Pavel Machek <pavel@suse.cz .code32 ALIGN SYM_CODE_START(wakeup_pmode_return) movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %fs movw %ax, %gs movw $__USER_DS, %ax movw %ax, %ds movw %ax, %es # reload the gdt, as we need the full 32 bit address lidt saved_idt lldt saved_ldt ljmp $(__KERNEL_CS), $1f 1: movl %cr3, %eax movl %eax, %cr3 wbinvd # and restore the stack ... but you need gdt for this to work movl saved_context_esp, %esp movl %cs:saved_magic, %eax cmpl $0x12345678, %eax jne bogus_magic # jump to place where we left off movl saved_eip, %eax jmp *%eax SYM_CODE_END(wakeup_pmode_return) bogus_magic: jmp bogus_magic save_registers: sidt saved_idt sldt saved_ldt str saved_tss leal 4(%esp), %eax movl %eax, saved_context_esp movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp movl %esi, saved_context_esi movl %edi, saved_context_edi pushfl popl saved_context_eflags movl $ret_point, saved_eip RET restore_registers: movl saved_context_ebp, %ebp movl saved_context_ebx, %ebx movl saved_context_esi, %esi movl saved_context_edi, %edi pushl saved_context_eflags popfl RET SYM_CODE_START(do_suspend_lowlevel) call save_processor_state call save_registers pushl $3 call x86_acpi_enter_sleep_state addl $4, %esp # In case of S3 failure, we'll emerge here. Jump # to ret_point to recover jmp ret_point .p2align 4,,7 ret_point: call restore_registers call restore_processor_state RET SYM_CODE_END(do_suspend_lowlevel) .data ALIGN SYM_DATA(saved_magic, .long 0) saved_eip: .long 0 # saved registers saved_idt: .long 0,0 saved_ldt: .long 0 saved_tss: .long 0
aixcc-public/challenge-001-exemplar-source
9,454
arch/x86/kvm/svm/vmenter.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/bitsperlong.h> #include <asm/kvm_vcpu_regs.h> #include <asm/nospec-branch.h> #include "kvm-asm-offsets.h" #define WORD_SIZE (BITS_PER_LONG / 8) /* Intentionally omit RAX as it's context switched by hardware */ #define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE) #define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE) #define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE) /* Intentionally omit RSP as it's context switched by hardware */ #define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE) #define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE) #define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE) #ifdef CONFIG_X86_64 #define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE) #define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE) #define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE) #define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE) #define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE) #define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE) #define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE) #define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE) #endif #define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa) .section .noinstr.text, "ax" .macro RESTORE_GUEST_SPEC_CTRL /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ ALTERNATIVE_2 "", \ "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \ "", X86_FEATURE_V_SPEC_CTRL 801: .endm .macro RESTORE_GUEST_SPEC_CTRL_BODY 800: /* * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the * host's, write the MSR. This is kept out-of-line so that the common * case does not have to jump. * * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, * there must not be any returns or indirect branches between this code * and vmentry. */ movl SVM_spec_ctrl(%_ASM_DI), %eax cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax je 801b mov $MSR_IA32_SPEC_CTRL, %ecx xor %edx, %edx wrmsr jmp 801b .endm .macro RESTORE_HOST_SPEC_CTRL /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ ALTERNATIVE_2 "", \ "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \ "", X86_FEATURE_V_SPEC_CTRL 901: .endm .macro RESTORE_HOST_SPEC_CTRL_BODY 900: /* Same for after vmexit. */ mov $MSR_IA32_SPEC_CTRL, %ecx /* * Load the value that the guest had written into MSR_IA32_SPEC_CTRL, * if it was not intercepted during guest execution. */ cmpb $0, (%_ASM_SP) jnz 998f rdmsr movl %eax, SVM_spec_ctrl(%_ASM_DI) 998: /* Now restore the host value of the MSR if different from the guest's. */ movl PER_CPU_VAR(x86_spec_ctrl_current), %eax cmp SVM_spec_ctrl(%_ASM_DI), %eax je 901b xor %edx, %edx wrmsr jmp 901b .endm /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * @svm: struct vcpu_svm * * @spec_ctrl_intercepted: bool */ SYM_FUNC_START(__svm_vcpu_run) push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 push %r14 push %r13 push %r12 #else push %edi push %esi #endif push %_ASM_BX /* * Save variables needed after vmexit on the stack, in inverse * order compared to when they are needed. */ /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ push %_ASM_ARG2 /* Needed to restore access to percpu variables. */ __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa) /* Finally save @svm. */ push %_ASM_ARG1 .ifnc _ASM_ARG1, _ASM_DI /* * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. */ mov %_ASM_ARG1, %_ASM_DI .endif /* Clobbers RAX, RCX, RDX. */ RESTORE_GUEST_SPEC_CTRL /* * Use a single vmcb (vmcb01 because it's always valid) for * context switching guest state via VMLOAD/VMSAVE, that way * the state doesn't need to be copied between vmcb01 and * vmcb02 when switching vmcbs for nested virtualization. */ mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 1: vmload %_ASM_AX 2: /* Get svm->current_vmcb->pa into RAX. */ mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX /* Load guest registers. */ mov VCPU_RCX(%_ASM_DI), %_ASM_CX mov VCPU_RDX(%_ASM_DI), %_ASM_DX mov VCPU_RBX(%_ASM_DI), %_ASM_BX mov VCPU_RBP(%_ASM_DI), %_ASM_BP mov VCPU_RSI(%_ASM_DI), %_ASM_SI #ifdef CONFIG_X86_64 mov VCPU_R8 (%_ASM_DI), %r8 mov VCPU_R9 (%_ASM_DI), %r9 mov VCPU_R10(%_ASM_DI), %r10 mov VCPU_R11(%_ASM_DI), %r11 mov VCPU_R12(%_ASM_DI), %r12 mov VCPU_R13(%_ASM_DI), %r13 mov VCPU_R14(%_ASM_DI), %r14 mov VCPU_R15(%_ASM_DI), %r15 #endif mov VCPU_RDI(%_ASM_DI), %_ASM_DI /* Enter guest mode */ sti 3: vmrun %_ASM_AX 4: cli /* Pop @svm to RAX while it's the only available register. */ pop %_ASM_AX /* Save all guest registers. */ mov %_ASM_CX, VCPU_RCX(%_ASM_AX) mov %_ASM_DX, VCPU_RDX(%_ASM_AX) mov %_ASM_BX, VCPU_RBX(%_ASM_AX) mov %_ASM_BP, VCPU_RBP(%_ASM_AX) mov %_ASM_SI, VCPU_RSI(%_ASM_AX) mov %_ASM_DI, VCPU_RDI(%_ASM_AX) #ifdef CONFIG_X86_64 mov %r8, VCPU_R8 (%_ASM_AX) mov %r9, VCPU_R9 (%_ASM_AX) mov %r10, VCPU_R10(%_ASM_AX) mov %r11, VCPU_R11(%_ASM_AX) mov %r12, VCPU_R12(%_ASM_AX) mov %r13, VCPU_R13(%_ASM_AX) mov %r14, VCPU_R14(%_ASM_AX) mov %r15, VCPU_R15(%_ASM_AX) #endif /* @svm can stay in RDI from now on. */ mov %_ASM_AX, %_ASM_DI mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 5: vmsave %_ASM_AX 6: /* Restores GSBASE among other things, allowing access to percpu data. */ pop %_ASM_AX 7: vmload %_ASM_AX 8: #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE #endif /* Clobbers RAX, RCX, RDX. */ RESTORE_HOST_SPEC_CTRL /* * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be * untrained as soon as we exit the VM and are back to the * kernel. This should be done before re-enabling interrupts * because interrupt handlers won't sanitize 'ret' if the return is * from the kernel. */ UNTRAIN_RET /* SRSO */ ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT /* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded * via the stack. In theory, an L1 cache miss when restoring registers * could lead to speculative execution with the guest's values. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially * free. RSP and RAX are exempt as they are restored by hardware * during VM-Exit. */ xor %ecx, %ecx xor %edx, %edx xor %ebx, %ebx xor %ebp, %ebp xor %esi, %esi xor %edi, %edi #ifdef CONFIG_X86_64 xor %r8d, %r8d xor %r9d, %r9d xor %r10d, %r10d xor %r11d, %r11d xor %r12d, %r12d xor %r13d, %r13d xor %r14d, %r14d xor %r15d, %r15d #endif /* "Pop" @spec_ctrl_intercepted. */ pop %_ASM_BX pop %_ASM_BX #ifdef CONFIG_X86_64 pop %r12 pop %r13 pop %r14 pop %r15 #else pop %esi pop %edi #endif pop %_ASM_BP RET RESTORE_GUEST_SPEC_CTRL_BODY RESTORE_HOST_SPEC_CTRL_BODY 10: cmpb $0, kvm_rebooting jne 2b ud2 30: cmpb $0, kvm_rebooting jne 4b ud2 50: cmpb $0, kvm_rebooting jne 6b ud2 70: cmpb $0, kvm_rebooting jne 8b ud2 _ASM_EXTABLE(1b, 10b) _ASM_EXTABLE(3b, 30b) _ASM_EXTABLE(5b, 50b) _ASM_EXTABLE(7b, 70b) SYM_FUNC_END(__svm_vcpu_run) /** * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode * @svm: struct vcpu_svm * * @spec_ctrl_intercepted: bool */ SYM_FUNC_START(__svm_sev_es_vcpu_run) push %_ASM_BP #ifdef CONFIG_X86_64 push %r15 push %r14 push %r13 push %r12 #else push %edi push %esi #endif push %_ASM_BX /* * Save variables needed after vmexit on the stack, in inverse * order compared to when they are needed. */ /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ push %_ASM_ARG2 /* Save @svm. */ push %_ASM_ARG1 .ifnc _ASM_ARG1, _ASM_DI /* * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. */ mov %_ASM_ARG1, %_ASM_DI .endif /* Clobbers RAX, RCX, RDX. */ RESTORE_GUEST_SPEC_CTRL /* Get svm->current_vmcb->pa into RAX. */ mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX /* Enter guest mode */ sti 1: vmrun %_ASM_AX 2: cli /* Pop @svm to RDI, guest registers have been saved already. */ pop %_ASM_DI #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE #endif /* Clobbers RAX, RCX, RDX. */ RESTORE_HOST_SPEC_CTRL /* * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be * untrained as soon as we exit the VM and are back to the * kernel. This should be done before re-enabling interrupts * because interrupt handlers won't sanitize RET if the return is * from the kernel. */ UNTRAIN_RET /* "Pop" @spec_ctrl_intercepted. */ pop %_ASM_BX pop %_ASM_BX #ifdef CONFIG_X86_64 pop %r12 pop %r13 pop %r14 pop %r15 #else pop %esi pop %edi #endif pop %_ASM_BP RET RESTORE_GUEST_SPEC_CTRL_BODY RESTORE_HOST_SPEC_CTRL_BODY 3: cmpb $0, kvm_rebooting jne 2b ud2 _ASM_EXTABLE(1b, 3b) SYM_FUNC_END(__svm_sev_es_vcpu_run)
aixcc-public/challenge-001-exemplar-source
8,884
arch/x86/kvm/vmx/vmenter.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/bitsperlong.h> #include <asm/kvm_vcpu_regs.h> #include <asm/nospec-branch.h> #include <asm/percpu.h> #include <asm/segment.h> #include "kvm-asm-offsets.h" #include "run_flags.h" #define WORD_SIZE (BITS_PER_LONG / 8) #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE /* Intentionally omit RSP as it's context switched by hardware */ #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE #ifdef CONFIG_X86_64 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE #endif .section .noinstr.text, "ax" /** * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode * @vmx: struct vcpu_vmx * * @regs: unsigned long * (to guest registers) * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl * * Returns: * 0 on VM-Exit, 1 on VM-Fail */ SYM_FUNC_START(__vmx_vcpu_run) push %_ASM_BP mov %_ASM_SP, %_ASM_BP #ifdef CONFIG_X86_64 push %r15 push %r14 push %r13 push %r12 #else push %edi push %esi #endif push %_ASM_BX /* Save @vmx for SPEC_CTRL handling */ push %_ASM_ARG1 /* Save @flags for SPEC_CTRL handling */ push %_ASM_ARG3 /* * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and * @regs is needed after VM-Exit to save the guest's register values. */ push %_ASM_ARG2 /* Copy @flags to BL, _ASM_ARG3 is volatile. */ mov %_ASM_ARG3B, %bl lea (%_ASM_SP), %_ASM_ARG2 call vmx_update_host_rsp ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL /* * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the * host's, write the MSR. * * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, * there must not be any returns or indirect branches between this code * and vmentry. */ mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI movl VMX_spec_ctrl(%_ASM_DI), %edi movl PER_CPU_VAR(x86_spec_ctrl_current), %esi cmp %edi, %esi je .Lspec_ctrl_done mov $MSR_IA32_SPEC_CTRL, %ecx xor %edx, %edx mov %edi, %eax wrmsr .Lspec_ctrl_done: /* * Since vmentry is serializing on affected CPUs, there's no need for * an LFENCE to stop speculation from skipping the wrmsr. */ /* Load @regs to RAX. */ mov (%_ASM_SP), %_ASM_AX /* Check if vmlaunch or vmresume is needed */ testb $VMX_RUN_VMRESUME, %bl /* Load guest registers. Don't clobber flags. */ mov VCPU_RCX(%_ASM_AX), %_ASM_CX mov VCPU_RDX(%_ASM_AX), %_ASM_DX mov VCPU_RBX(%_ASM_AX), %_ASM_BX mov VCPU_RBP(%_ASM_AX), %_ASM_BP mov VCPU_RSI(%_ASM_AX), %_ASM_SI mov VCPU_RDI(%_ASM_AX), %_ASM_DI #ifdef CONFIG_X86_64 mov VCPU_R8 (%_ASM_AX), %r8 mov VCPU_R9 (%_ASM_AX), %r9 mov VCPU_R10(%_ASM_AX), %r10 mov VCPU_R11(%_ASM_AX), %r11 mov VCPU_R12(%_ASM_AX), %r12 mov VCPU_R13(%_ASM_AX), %r13 mov VCPU_R14(%_ASM_AX), %r14 mov VCPU_R15(%_ASM_AX), %r15 #endif /* Load guest RAX. This kills the @regs pointer! */ mov VCPU_RAX(%_ASM_AX), %_ASM_AX /* Check EFLAGS.ZF from 'testb' above */ jz .Lvmlaunch /* * After a successful VMRESUME/VMLAUNCH, control flow "magically" * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting. * So this isn't a typical function and objtool needs to be told to * save the unwind state here and restore it below. */ UNWIND_HINT_SAVE /* * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at * the 'vmx_vmexit' label below. */ .Lvmresume: vmresume jmp .Lvmfail .Lvmlaunch: vmlaunch jmp .Lvmfail _ASM_EXTABLE(.Lvmresume, .Lfixup) _ASM_EXTABLE(.Lvmlaunch, .Lfixup) SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ UNWIND_HINT_RESTORE ENDBR /* Temporarily save guest's RAX. */ push %_ASM_AX /* Reload @regs to RAX. */ mov WORD_SIZE(%_ASM_SP), %_ASM_AX /* Save all guest registers, including RAX from the stack */ pop VCPU_RAX(%_ASM_AX) mov %_ASM_CX, VCPU_RCX(%_ASM_AX) mov %_ASM_DX, VCPU_RDX(%_ASM_AX) mov %_ASM_BX, VCPU_RBX(%_ASM_AX) mov %_ASM_BP, VCPU_RBP(%_ASM_AX) mov %_ASM_SI, VCPU_RSI(%_ASM_AX) mov %_ASM_DI, VCPU_RDI(%_ASM_AX) #ifdef CONFIG_X86_64 mov %r8, VCPU_R8 (%_ASM_AX) mov %r9, VCPU_R9 (%_ASM_AX) mov %r10, VCPU_R10(%_ASM_AX) mov %r11, VCPU_R11(%_ASM_AX) mov %r12, VCPU_R12(%_ASM_AX) mov %r13, VCPU_R13(%_ASM_AX) mov %r14, VCPU_R14(%_ASM_AX) mov %r15, VCPU_R15(%_ASM_AX) #endif /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */ xor %ebx, %ebx .Lclear_regs: /* Discard @regs. The register is irrelevant, it just can't be RBX. */ pop %_ASM_AX /* * Clear all general purpose registers except RSP and RBX to prevent * speculative use of the guest's values, even those that are reloaded * via the stack. In theory, an L1 cache miss when restoring registers * could lead to speculative execution with the guest's values. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially * free. RSP and RBX are exempt as RSP is restored by hardware during * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return * value. */ xor %eax, %eax xor %ecx, %ecx xor %edx, %edx xor %ebp, %ebp xor %esi, %esi xor %edi, %edi #ifdef CONFIG_X86_64 xor %r8d, %r8d xor %r9d, %r9d xor %r10d, %r10d xor %r11d, %r11d xor %r12d, %r12d xor %r13d, %r13d xor %r14d, %r14d xor %r15d, %r15d #endif /* * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before * the first unbalanced RET after vmexit! * * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB * entries and (in some cases) RSB underflow. * * eIBRS has its own protection against poisoned RSB, so it doesn't * need the RSB filling sequence. But it does need to be enabled, and a * single call to retire, before the first unbalanced RET. */ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\ X86_FEATURE_RSB_VMEXIT_LITE pop %_ASM_ARG2 /* @flags */ pop %_ASM_ARG1 /* @vmx */ call vmx_spec_ctrl_restore_host /* Put return value in AX */ mov %_ASM_BX, %_ASM_AX pop %_ASM_BX #ifdef CONFIG_X86_64 pop %r12 pop %r13 pop %r14 pop %r15 #else pop %esi pop %edi #endif pop %_ASM_BP RET .Lfixup: cmpb $0, kvm_rebooting jne .Lvmfail ud2 .Lvmfail: /* VM-Fail: set return value to 1 */ mov $1, %_ASM_BX jmp .Lclear_regs SYM_FUNC_END(__vmx_vcpu_run) .section .text, "ax" /** * vmread_error_trampoline - Trampoline from inline asm to vmread_error() * @field: VMCS field encoding that failed * @fault: %true if the VMREAD faulted, %false if it failed * Save and restore volatile registers across a call to vmread_error(). Note, * all parameters are passed on the stack. */ SYM_FUNC_START(vmread_error_trampoline) push %_ASM_BP mov %_ASM_SP, %_ASM_BP push %_ASM_AX push %_ASM_CX push %_ASM_DX #ifdef CONFIG_X86_64 push %rdi push %rsi push %r8 push %r9 push %r10 push %r11 #endif /* Load @field and @fault to arg1 and arg2 respectively. */ mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2 mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1 call vmread_error /* Zero out @fault, which will be popped into the result register. */ _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) #ifdef CONFIG_X86_64 pop %r11 pop %r10 pop %r9 pop %r8 pop %rsi pop %rdi #endif pop %_ASM_DX pop %_ASM_CX pop %_ASM_AX pop %_ASM_BP RET SYM_FUNC_END(vmread_error_trampoline) SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) /* * Unconditionally create a stack frame, getting the correct RSP on the * stack (for x86-64) would take two instructions anyways, and RBP can * be used to restore RSP to make objtool happy (see below). */ push %_ASM_BP mov %_ASM_SP, %_ASM_BP #ifdef CONFIG_X86_64 /* * Align RSP to a 16-byte boundary (to emulate CPU behavior) before * creating the synthetic interrupt stack frame for the IRQ/NMI. */ and $-16, %rsp push $__KERNEL_DS push %rbp #endif pushf push $__KERNEL_CS CALL_NOSPEC _ASM_ARG1 /* * "Restore" RSP from RBP, even though IRET has already unwound RSP to * the correct value. objtool doesn't know the callee will IRET and, * without the explicit restore, thinks the stack is getting walloped. * Using an unwind hint is problematic due to x86-64's dynamic alignment. */ mov %_ASM_BP, %_ASM_SP pop %_ASM_BP RET SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
aixcc-public/challenge-001-exemplar-source
5,674
arch/x86/boot/compressed/head_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/boot/head.S * * Copyright (C) 1991, 1992, 1993 Linus Torvalds */ /* * head.S contains the 32-bit startup code. * * NOTE!!! Startup happens at absolute address 0x00001000, which is also where * the page directory will exist. The startup code will be overwritten by * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ .text #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/boot.h> #include <asm/asm-offsets.h> #include <asm/bootparam.h> /* * These symbols needed to be marked as .hidden to prevent the BFD linker from * generating R_386_32 (rather than R_386_RELATIVE) relocations for them when * the 32-bit compressed kernel is linked as PIE. This is no longer necessary, * but it doesn't hurt to keep them .hidden. */ .hidden _bss .hidden _ebss .hidden _end __HEAD SYM_FUNC_START(startup_32) cld cli /* * Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %edx addl $_GLOBAL_OFFSET_TABLE_+(.-1b), %edx /* Load new GDT */ leal gdt@GOTOFF(%edx), %eax movl %eax, 2(%eax) lgdt (%eax) /* Load segment registers with our descriptors */ movl $__BOOT_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %fs movl %eax, %gs movl %eax, %ss /* * %edx contains the address we are loaded at by the boot loader (plus the * offset to the GOT). The below code calculates %ebx to be the address where * we should move the kernel image temporarily for safe in-place decompression * (again, plus the offset to the GOT). * * %ebp is calculated to be the address that the kernel will be decompressed to. */ #ifdef CONFIG_RELOCATABLE leal startup_32@GOTOFF(%edx), %ebx #ifdef CONFIG_EFI_STUB /* * If we were loaded via the EFI LoadImage service, startup_32() will be at an * offset to the start of the space allocated for the image. efi_pe_entry() will * set up image_offset to tell us where the image actually starts, so that we * can use the full available buffer. * image_offset = startup_32 - image_base * Otherwise image_offset will be zero and has no effect on the calculations. */ subl image_offset@GOTOFF(%edx), %ebx #endif movl BP_kernel_alignment(%esi), %eax decl %eax addl %eax, %ebx notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: movl %ebx, %ebp // Save the output address for later /* Target address to relocate to for decompression */ addl BP_init_size(%esi), %ebx subl $_end@GOTOFF, %ebx /* Set up the stack */ leal boot_stack_end@GOTOFF(%ebx), %esp /* Zero EFLAGS */ pushl $0 popfl /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushl %esi leal (_bss@GOTOFF-4)(%edx), %esi leal (_bss@GOTOFF-4)(%ebx), %edi movl $(_bss - startup_32), %ecx shrl $2, %ecx std rep movsl cld popl %esi /* * The GDT may get overwritten either during the copy we just did or * during extract_kernel below. To avoid any issues, repoint the GDTR * to the new copy of the GDT. */ leal gdt@GOTOFF(%ebx), %eax movl %eax, 2(%eax) lgdt (%eax) /* * Jump to the relocated address. */ leal .Lrelocated@GOTOFF(%ebx), %eax jmp *%eax SYM_FUNC_END(startup_32) #ifdef CONFIG_EFI_STUB SYM_FUNC_START(efi32_stub_entry) add $0x4, %esp movl 8(%esp), %esi /* save boot_params pointer */ call efi_main /* efi_main returns the possibly relocated address of startup_32 */ jmp *%eax SYM_FUNC_END(efi32_stub_entry) SYM_FUNC_ALIAS(efi_stub_entry, efi32_stub_entry) #endif .text SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) */ xorl %eax, %eax leal _bss@GOTOFF(%ebx), %edi leal _ebss@GOTOFF(%ebx), %ecx subl %edi, %ecx shrl $2, %ecx rep stosl /* * Do the extraction, and jump to the new kernel.. */ /* push arguments for extract_kernel: */ pushl output_len@GOTOFF(%ebx) /* decompressed length, end of relocs */ pushl %ebp /* output address */ pushl input_len@GOTOFF(%ebx) /* input_len */ leal input_data@GOTOFF(%ebx), %eax pushl %eax /* input_data */ leal boot_heap@GOTOFF(%ebx), %eax pushl %eax /* heap area */ pushl %esi /* real mode pointer */ call extract_kernel /* returns kernel location in %eax */ addl $24, %esp /* * Jump to the extracted kernel. */ xorl %ebx, %ebx jmp *%eax SYM_FUNC_END(.Lrelocated) .data .balign 8 SYM_DATA_START_LOCAL(gdt) .word gdt_end - gdt - 1 .long 0 .word 0 .quad 0x0000000000000000 /* Reserved */ .quad 0x00cf9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) #ifdef CONFIG_EFI_STUB SYM_DATA(image_offset, .long 0) #endif /* * Stack and heap for uncompression */ .bss .balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end:
aixcc-public/challenge-001-exemplar-source
3,622
arch/x86/boot/compressed/efi_thunk_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming * * Early support for invoking 32-bit EFI services from a 64-bit kernel. * * Because this thunking occurs before ExitBootServices() we have to * restore the firmware's 32-bit GDT and IDT before we make EFI service * calls. * * On the plus side, we don't have to worry about mangling 64-bit * addresses into 32-bits because we're executing with an identity * mapped pagetable and haven't transitioned to 64-bit virtual addresses * yet. */ #include <linux/linkage.h> #include <asm/msr.h> #include <asm/page_types.h> #include <asm/processor-flags.h> #include <asm/segment.h> .code64 .text SYM_FUNC_START(__efi64_thunk) push %rbp push %rbx movl %ds, %eax push %rax movl %es, %eax push %rax movl %ss, %eax push %rax /* Copy args passed on stack */ movq 0x30(%rsp), %rbp movq 0x38(%rsp), %rbx movq 0x40(%rsp), %rax /* * Convert x86-64 ABI params to i386 ABI */ subq $64, %rsp movl %esi, 0x0(%rsp) movl %edx, 0x4(%rsp) movl %ecx, 0x8(%rsp) movl %r8d, 0xc(%rsp) movl %r9d, 0x10(%rsp) movl %ebp, 0x14(%rsp) movl %ebx, 0x18(%rsp) movl %eax, 0x1c(%rsp) leaq 0x20(%rsp), %rbx sgdt (%rbx) addq $16, %rbx sidt (%rbx) leaq 1f(%rip), %rbp /* * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT * and IDT that was installed when the kernel started executing. The * pointers were saved at the EFI stub entry point in head_64.S. * * Pass the saved DS selector to the 32-bit code, and use far return to * restore the saved CS selector. */ leaq efi32_boot_idt(%rip), %rax lidt (%rax) leaq efi32_boot_gdt(%rip), %rax lgdt (%rax) movzwl efi32_boot_ds(%rip), %edx movzwq efi32_boot_cs(%rip), %rax pushq %rax leaq efi_enter32(%rip), %rax pushq %rax lretq 1: addq $64, %rsp movq %rdi, %rax pop %rbx movl %ebx, %ss pop %rbx movl %ebx, %es pop %rbx movl %ebx, %ds /* Clear out 32-bit selector from FS and GS */ xorl %ebx, %ebx movl %ebx, %fs movl %ebx, %gs /* * Convert 32-bit status code into 64-bit. */ roll $1, %eax rorq $1, %rax pop %rbx pop %rbp RET SYM_FUNC_END(__efi64_thunk) .code32 /* * EFI service pointer must be in %edi. * * The stack should represent the 32-bit calling convention. */ SYM_FUNC_START_LOCAL(efi_enter32) /* Load firmware selector into data and stack segment registers */ movl %edx, %ds movl %edx, %es movl %edx, %fs movl %edx, %gs movl %edx, %ss /* Reload pgtables */ movl %cr3, %eax movl %eax, %cr3 /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 /* Disable long mode via EFER */ movl $MSR_EFER, %ecx rdmsr btrl $_EFER_LME, %eax wrmsr call *%edi /* We must preserve return value */ movl %eax, %edi /* * Some firmware will return with interrupts enabled. Be sure to * disable them before we switch GDTs and IDTs. */ cli lidtl (%ebx) subl $16, %ebx lgdtl (%ebx) movl %cr4, %eax btsl $(X86_CR4_PAE_BIT), %eax movl %eax, %cr4 movl %cr3, %eax movl %eax, %cr3 movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr xorl %eax, %eax lldt %ax pushl $__KERNEL_CS pushl %ebp /* Enable paging */ movl %cr0, %eax btsl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 lret SYM_FUNC_END(efi_enter32) .data .balign 8 SYM_DATA_START(efi32_boot_gdt) .word 0 .quad 0 SYM_DATA_END(efi32_boot_gdt) SYM_DATA_START(efi32_boot_idt) .word 0 .quad 0 SYM_DATA_END(efi32_boot_idt) SYM_DATA_START(efi32_boot_cs) .word 0 SYM_DATA_END(efi32_boot_cs) SYM_DATA_START(efi32_boot_ds) .word 0 SYM_DATA_END(efi32_boot_ds)
aixcc-public/challenge-001-exemplar-source
4,039
arch/x86/boot/compressed/mem_encrypt.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * AMD Memory Encryption Support * * Copyright (C) 2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> */ #include <linux/linkage.h> #include <asm/processor-flags.h> #include <asm/msr.h> #include <asm/asm-offsets.h> .text .code32 SYM_FUNC_START(get_sev_encryption_bit) xor %eax, %eax #ifdef CONFIG_AMD_MEM_ENCRYPT push %ebx push %ecx push %edx movl $0x80000000, %eax /* CPUID to check the highest leaf */ cpuid cmpl $0x8000001f, %eax /* See if 0x8000001f is available */ jb .Lno_sev /* * Check for the SEV feature: * CPUID Fn8000_001F[EAX] - Bit 1 * CPUID Fn8000_001F[EBX] - Bits 5:0 * Pagetable bit position used to indicate encryption */ movl $0x8000001f, %eax cpuid bt $1, %eax /* Check if SEV is available */ jnc .Lno_sev movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */ rdmsr bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */ jnc .Lno_sev movl %ebx, %eax andl $0x3f, %eax /* Return the encryption bit location */ jmp .Lsev_exit .Lno_sev: xor %eax, %eax .Lsev_exit: pop %edx pop %ecx pop %ebx #endif /* CONFIG_AMD_MEM_ENCRYPT */ RET SYM_FUNC_END(get_sev_encryption_bit) /** * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using * the GHCB MSR protocol * * @%eax: Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX) * @%edx: CPUID Function * * Returns 0 in %eax on success, non-zero on failure * %edx returns CPUID value on success */ SYM_CODE_START_LOCAL(sev_es_req_cpuid) shll $30, %eax orl $0x00000004, %eax movl $MSR_AMD64_SEV_ES_GHCB, %ecx wrmsr rep; vmmcall # VMGEXIT rdmsr /* Check response */ movl %eax, %ecx andl $0x3ffff000, %ecx # Bits [12-29] MBZ jnz 2f /* Check return code */ andl $0xfff, %eax cmpl $5, %eax jne 2f /* All good - return success */ xorl %eax, %eax 1: RET 2: movl $-1, %eax jmp 1b SYM_CODE_END(sev_es_req_cpuid) SYM_CODE_START(startup32_vc_handler) pushl %eax pushl %ebx pushl %ecx pushl %edx /* Keep CPUID function in %ebx */ movl %eax, %ebx /* Check if error-code == SVM_EXIT_CPUID */ cmpl $0x72, 16(%esp) jne .Lfail movl $0, %eax # Request CPUID[fn].EAX movl %ebx, %edx # CPUID fn call sev_es_req_cpuid # Call helper testl %eax, %eax # Check return code jnz .Lfail movl %edx, 12(%esp) # Store result movl $1, %eax # Request CPUID[fn].EBX movl %ebx, %edx # CPUID fn call sev_es_req_cpuid # Call helper testl %eax, %eax # Check return code jnz .Lfail movl %edx, 8(%esp) # Store result movl $2, %eax # Request CPUID[fn].ECX movl %ebx, %edx # CPUID fn call sev_es_req_cpuid # Call helper testl %eax, %eax # Check return code jnz .Lfail movl %edx, 4(%esp) # Store result movl $3, %eax # Request CPUID[fn].EDX movl %ebx, %edx # CPUID fn call sev_es_req_cpuid # Call helper testl %eax, %eax # Check return code jnz .Lfail movl %edx, 0(%esp) # Store result /* * Sanity check CPUID results from the Hypervisor. See comment in * do_vc_no_ghcb() for more details on why this is necessary. */ /* Fail if SEV leaf not available in CPUID[0x80000000].EAX */ cmpl $0x80000000, %ebx jne .Lcheck_sev cmpl $0x8000001f, 12(%esp) jb .Lfail jmp .Ldone .Lcheck_sev: /* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */ cmpl $0x8000001f, %ebx jne .Ldone btl $1, 12(%esp) jnc .Lfail .Ldone: popl %edx popl %ecx popl %ebx popl %eax /* Remove error code */ addl $4, %esp /* Jump over CPUID instruction */ addl $2, (%esp) iret .Lfail: /* Send terminate request to Hypervisor */ movl $0x100, %eax xorl %edx, %edx movl $MSR_AMD64_SEV_ES_GHCB, %ecx wrmsr rep; vmmcall /* If request fails, go to hlt loop */ hlt jmp .Lfail SYM_CODE_END(startup32_vc_handler) .code64 #include "../../kernel/sev_verify_cbit.S" .data #ifdef CONFIG_AMD_MEM_ENCRYPT .balign 8 SYM_DATA(sme_me_mask, .quad 0) SYM_DATA(sev_status, .quad 0) SYM_DATA(sev_check_data, .quad 0) #endif
aixcc-public/challenge-001-exemplar-source
2,062
arch/x86/boot/compressed/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) #undef i386 #include <asm/cache.h> #include <asm/page_types.h> #ifdef CONFIG_X86_64 OUTPUT_ARCH(i386:x86-64) ENTRY(startup_64) #else OUTPUT_ARCH(i386) ENTRY(startup_32) #endif SECTIONS { /* Be careful parts of head_64.S assume startup_32 is at * address 0. */ . = 0; .head.text : { _head = . ; HEAD_TEXT _ehead = . ; } .rodata..compressed : { *(.rodata..compressed) } .text : { _text = .; /* Text */ *(.text) *(.text.*) _etext = . ; } .rodata : { _rodata = . ; *(.rodata) /* read-only data */ *(.rodata.*) _erodata = . ; } .data : { _data = . ; *(.data) *(.data.*) *(.bss.efistub) _edata = . ; } . = ALIGN(L1_CACHE_BYTES); .bss : { _bss = . ; *(.bss) *(.bss.*) *(COMMON) . = ALIGN(8); /* For convenience during zeroing */ _ebss = .; } #ifdef CONFIG_X86_64 . = ALIGN(PAGE_SIZE); .pgtable : { _pgtable = . ; *(.pgtable) _epgtable = . ; } #endif . = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */ _end = .; STABS_DEBUG DWARF_DEBUG ELF_DETAILS DISCARDS /DISCARD/ : { *(.dynamic) *(.dynsym) *(.dynstr) *(.dynbss) *(.hash) *(.gnu.hash) *(.note.*) } .got.plt (INFO) : { *(.got.plt) } ASSERT(SIZEOF(.got.plt) == 0 || #ifdef CONFIG_X86_64 SIZEOF(.got.plt) == 0x18, #else SIZEOF(.got.plt) == 0xc, #endif "Unexpected GOT/PLT entries detected!") /* * Sections that should stay zero sized, which is safer to * explicitly check instead of blindly discarding. */ .got : { *(.got) } ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") .plt : { *(.plt) *(.plt.*) } ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") .rel.dyn : { *(.rel.*) *(.rel_*) } ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") .rela.dyn : { *(.rela.*) *(.rela_*) } ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") }
aixcc-public/challenge-001-exemplar-source
26,880
arch/x86/boot/compressed/head_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/boot/head.S * * Copyright (C) 1991, 1992, 1993 Linus Torvalds */ /* * head.S contains the 32-bit startup code. * * NOTE!!! Startup happens at absolute address 0x00001000, which is also where * the page directory will exist. The startup code will be overwritten by * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ .code32 .text #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/boot.h> #include <asm/msr.h> #include <asm/processor-flags.h> #include <asm/asm-offsets.h> #include <asm/bootparam.h> #include <asm/desc_defs.h> #include <asm/trapnr.h> #include "pgtable.h" /* * Locally defined symbols should be marked hidden: */ .hidden _bss .hidden _ebss .hidden _end __HEAD /* * This macro gives the relative virtual address of X, i.e. the offset of X * from startup_32. This is the same as the link-time virtual address of X, * since startup_32 is at 0, but defining it this way tells the * assembler/linker that we do not want the actual run-time address of X. This * prevents the linker from trying to create unwanted run-time relocation * entries for the reference when the compressed kernel is linked as PIE. * * A reference X(%reg) will result in the link-time VA of X being stored with * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that * adds the 64-bit base address where the kernel is loaded. * * Replacing it with (X-startup_32)(%reg) results in the offset being stored, * and no run-time relocation. * * The macro should be used as a displacement with a base register containing * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate * [$ rva(X)]. * * This macro can only be used from within the .head.text section, since the * expression requires startup_32 to be in the same section as the code being * assembled. */ #define rva(X) ((X) - startup_32) .code32 SYM_FUNC_START(startup_32) /* * 32bit entry is 0 and it is ABI so immutable! * If we come here directly from a bootloader, * kernel(text+data+bss+brk) ramdisk, zero_page, command line * all need to be under the 4G limit. */ cld cli /* * Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp subl $ rva(1b), %ebp /* Load new GDT with the 64bit segments using 32bit descriptor */ leal rva(gdt)(%ebp), %eax movl %eax, 2(%eax) lgdt (%eax) /* Load segment registers with our descriptors */ movl $__BOOT_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %fs movl %eax, %gs movl %eax, %ss /* Setup a stack and load CS from current GDT */ leal rva(boot_stack_end)(%ebp), %esp pushl $__KERNEL32_CS leal rva(1f)(%ebp), %eax pushl %eax lretl 1: /* Setup Exception handling for SEV-ES */ call startup32_load_idt /* Make sure cpu supports long mode. */ call verify_cpu testl %eax, %eax jnz .Lno_longmode /* * Compute the delta between where we were compiled to run at * and where the code will actually run at. * * %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE movl %ebp, %ebx #ifdef CONFIG_EFI_STUB /* * If we were loaded via the EFI LoadImage service, startup_32 will be at an * offset to the start of the space allocated for the image. efi_pe_entry will * set up image_offset to tell us where the image actually starts, so that we * can use the full available buffer. * image_offset = startup_32 - image_base * Otherwise image_offset will be zero and has no effect on the calculations. */ subl rva(image_offset)(%ebp), %ebx #endif movl BP_kernel_alignment(%esi), %eax decl %eax addl %eax, %ebx notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: /* Target address to relocate to for decompression */ addl BP_init_size(%esi), %ebx subl $ rva(_end), %ebx /* * Prepare for entering 64 bit mode */ /* Enable PAE mode */ movl %cr4, %eax orl $X86_CR4_PAE, %eax movl %eax, %cr4 /* * Build early 4G boot pagetable */ /* * If SEV is active then set the encryption mask in the page tables. * This will insure that when the kernel is copied and decompressed * it will be done so encrypted. */ call get_sev_encryption_bit xorl %edx, %edx #ifdef CONFIG_AMD_MEM_ENCRYPT testl %eax, %eax jz 1f subl $32, %eax /* Encryption bit is always above bit 31 */ bts %eax, %edx /* Set encryption mask for page tables */ /* * Set MSR_AMD64_SEV_ENABLED_BIT in sev_status so that * startup32_check_sev_cbit() will do a check. sev_enable() will * initialize sev_status with all the bits reported by * MSR_AMD_SEV_STATUS later, but only MSR_AMD64_SEV_ENABLED_BIT * needs to be set for now. */ movl $1, rva(sev_status)(%ebp) 1: #endif /* Initialize Page tables to 0 */ leal rva(pgtable)(%ebx), %edi xorl %eax, %eax movl $(BOOT_INIT_PGT_SIZE/4), %ecx rep stosl /* Build Level 4 */ leal rva(pgtable + 0)(%ebx), %edi leal 0x1007 (%edi), %eax movl %eax, 0(%edi) addl %edx, 4(%edi) /* Build Level 3 */ leal rva(pgtable + 0x1000)(%ebx), %edi leal 0x1007(%edi), %eax movl $4, %ecx 1: movl %eax, 0x00(%edi) addl %edx, 0x04(%edi) addl $0x00001000, %eax addl $8, %edi decl %ecx jnz 1b /* Build Level 2 */ leal rva(pgtable + 0x2000)(%ebx), %edi movl $0x00000183, %eax movl $2048, %ecx 1: movl %eax, 0(%edi) addl %edx, 4(%edi) addl $0x00200000, %eax addl $8, %edi decl %ecx jnz 1b /* Enable the boot page tables */ leal rva(pgtable)(%ebx), %eax movl %eax, %cr3 /* Enable Long mode in EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr /* After gdt is loaded */ xorl %eax, %eax lldt %ax movl $__BOOT_TSS, %eax ltr %ax /* * Setup for the jump to 64bit mode * * When the jump is performed we will be in long mode but * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. * We place all of the values on our mini stack so lret can * used to perform that far jump. */ leal rva(startup_64)(%ebp), %eax #ifdef CONFIG_EFI_MIXED movl rva(efi32_boot_args)(%ebp), %edi testl %edi, %edi jz 1f leal rva(efi64_stub_entry)(%ebp), %eax movl rva(efi32_boot_args+4)(%ebp), %esi movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer testl %edx, %edx jnz 1f /* * efi_pe_entry uses MS calling convention, which requires 32 bytes of * shadow space on the stack even if all arguments are passed in * registers. We also need an additional 8 bytes for the space that * would be occupied by the return address, and this also results in * the correct stack alignment for entry. */ subl $40, %esp leal rva(efi_pe_entry)(%ebp), %eax movl %edi, %ecx // MS calling convention movl %esi, %edx 1: #endif /* Check if the C-bit position is correct when SEV is active */ call startup32_check_sev_cbit pushl $__KERNEL_CS pushl %eax /* Enter paged protected Mode, activating Long Mode */ movl $CR0_STATE, %eax movl %eax, %cr0 /* Jump from 32bit compatibility mode into 64bit mode. */ lret SYM_FUNC_END(startup_32) #ifdef CONFIG_EFI_MIXED .org 0x190 SYM_FUNC_START(efi32_stub_entry) add $0x4, %esp /* Discard return address */ popl %ecx popl %edx popl %esi call 1f 1: pop %ebp subl $ rva(1b), %ebp movl %esi, rva(efi32_boot_args+8)(%ebp) SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL) movl %ecx, rva(efi32_boot_args)(%ebp) movl %edx, rva(efi32_boot_args+4)(%ebp) movb $0, rva(efi_is64)(%ebp) /* Save firmware GDTR and code/data selectors */ sgdtl rva(efi32_boot_gdt)(%ebp) movw %cs, rva(efi32_boot_cs)(%ebp) movw %ds, rva(efi32_boot_ds)(%ebp) /* Store firmware IDT descriptor */ sidtl rva(efi32_boot_idt)(%ebp) /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 jmp startup_32 SYM_FUNC_END(efi32_stub_entry) #endif .code64 .org 0x200 SYM_CODE_START(startup_64) /* * 64bit entry is 0x200 and it is ABI so immutable! * We come here either from startup_32 or directly from a * 64bit bootloader. * If we come here from a bootloader, kernel(text+data+bss+brk), * ramdisk, zero_page, command line could be above 4G. * We depend on an identity mapped page table being provided * that maps our entire kernel(text+data+bss+brk), zero page * and command line. */ cld cli /* Setup data segments. */ xorl %eax, %eax movl %eax, %ds movl %eax, %es movl %eax, %ss movl %eax, %fs movl %eax, %gs /* * Compute the decompressed kernel start address. It is where * we were loaded at aligned to a 2M boundary. %rbp contains the * decompressed kernel start address. * * If it is a relocatable kernel then decompress and run the kernel * from load address aligned to 2MB addr, otherwise decompress and * run the kernel from LOAD_PHYSICAL_ADDR * * We cannot rely on the calculation done in 32-bit mode, since we * may have been invoked via the 64-bit entry point. */ /* Start with the delta to where the kernel will run at. */ #ifdef CONFIG_RELOCATABLE leaq startup_32(%rip) /* - $startup_32 */, %rbp #ifdef CONFIG_EFI_STUB /* * If we were loaded via the EFI LoadImage service, startup_32 will be at an * offset to the start of the space allocated for the image. efi_pe_entry will * set up image_offset to tell us where the image actually starts, so that we * can use the full available buffer. * image_offset = startup_32 - image_base * Otherwise image_offset will be zero and has no effect on the calculations. */ movl image_offset(%rip), %eax subq %rax, %rbp #endif movl BP_kernel_alignment(%rsi), %eax decl %eax addq %rax, %rbp notq %rax andq %rax, %rbp cmpq $LOAD_PHYSICAL_ADDR, %rbp jae 1f #endif movq $LOAD_PHYSICAL_ADDR, %rbp 1: /* Target address to relocate to for decompression */ movl BP_init_size(%rsi), %ebx subl $ rva(_end), %ebx addq %rbp, %rbx /* Set up the stack */ leaq rva(boot_stack_end)(%rbx), %rsp /* * At this point we are in long mode with 4-level paging enabled, * but we might want to enable 5-level paging or vice versa. * * The problem is that we cannot do it directly. Setting or clearing * CR4.LA57 in long mode would trigger #GP. So we need to switch off * long mode and paging first. * * We also need a trampoline in lower memory to switch over from * 4- to 5-level paging for cases when the bootloader puts the kernel * above 4G, but didn't enable 5-level paging for us. * * The same trampoline can be used to switch from 5- to 4-level paging * mode, like when starting 4-level paging kernel via kexec() when * original kernel worked in 5-level paging mode. * * For the trampoline, we need the top page table to reside in lower * memory as we don't have a way to load 64-bit values into CR3 in * 32-bit mode. * * We go though the trampoline even if we don't have to: if we're * already in a desired paging mode. This way the trampoline code gets * tested on every boot. */ /* Make sure we have GDT with 32-bit code segment */ leaq gdt64(%rip), %rax addq %rax, 2(%rax) lgdt (%rax) /* Reload CS so IRET returns to a CS actually in the GDT */ pushq $__KERNEL_CS leaq .Lon_kernel_cs(%rip), %rax pushq %rax lretq .Lon_kernel_cs: pushq %rsi call load_stage1_idt popq %rsi #ifdef CONFIG_AMD_MEM_ENCRYPT /* * Now that the stage1 interrupt handlers are set up, #VC exceptions from * CPUID instructions can be properly handled for SEV-ES guests. * * For SEV-SNP, the CPUID table also needs to be set up in advance of any * CPUID instructions being issued, so go ahead and do that now via * sev_enable(), which will also handle the rest of the SEV-related * detection/setup to ensure that has been done in advance of any dependent * code. */ pushq %rsi movq %rsi, %rdi /* real mode address */ call sev_enable popq %rsi #endif /* * paging_prepare() sets up the trampoline and checks if we need to * enable 5-level paging. * * paging_prepare() returns a two-quadword structure which lands * into RDX:RAX: * - Address of the trampoline is returned in RAX. * - Non zero RDX means trampoline needs to enable 5-level * paging. * * RSI holds real mode data and needs to be preserved across * this function call. */ pushq %rsi movq %rsi, %rdi /* real mode address */ call paging_prepare popq %rsi /* Save the trampoline address in RCX */ movq %rax, %rcx /* Set up 32-bit addressable stack */ leaq TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp /* * Preserve live 64-bit registers on the stack: this is necessary * because the architecture does not guarantee that GPRs will retain * their full 64-bit values across a 32-bit mode switch. */ pushq %rbp pushq %rbx pushq %rsi /* * Push the 64-bit address of trampoline_return() onto the new stack. * It will be used by the trampoline to return to the main code. Due to * the 32-bit mode switch, it cannot be kept it in a register either. */ leaq trampoline_return(%rip), %rdi pushq %rdi /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ pushq $__KERNEL32_CS leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax pushq %rax lretq trampoline_return: /* Restore live 64-bit registers */ popq %rsi popq %rbx popq %rbp /* Restore the stack, the 32-bit trampoline uses its own stack */ leaq rva(boot_stack_end)(%rbx), %rsp /* * cleanup_trampoline() would restore trampoline memory. * * RDI is address of the page table to use instead of page table * in trampoline memory (if required). * * RSI holds real mode data and needs to be preserved across * this function call. */ pushq %rsi leaq rva(top_pgtable)(%rbx), %rdi call cleanup_trampoline popq %rsi /* Zero EFLAGS */ pushq $0 popfq /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushq %rsi leaq (_bss-8)(%rip), %rsi leaq rva(_bss-8)(%rbx), %rdi movl $(_bss - startup_32), %ecx shrl $3, %ecx std rep movsq cld popq %rsi /* * The GDT may get overwritten either during the copy we just did or * during extract_kernel below. To avoid any issues, repoint the GDTR * to the new copy of the GDT. */ leaq rva(gdt64)(%rbx), %rax leaq rva(gdt)(%rbx), %rdx movq %rdx, 2(%rax) lgdt (%rax) /* * Jump to the relocated address. */ leaq rva(.Lrelocated)(%rbx), %rax jmp *%rax SYM_CODE_END(startup_64) #ifdef CONFIG_EFI_STUB .org 0x390 SYM_FUNC_START(efi64_stub_entry) and $~0xf, %rsp /* realign the stack */ movq %rdx, %rbx /* save boot_params pointer */ call efi_main movq %rbx,%rsi leaq rva(startup_64)(%rax), %rax jmp *%rax SYM_FUNC_END(efi64_stub_entry) SYM_FUNC_ALIAS(efi_stub_entry, efi64_stub_entry) #endif .text SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) */ xorl %eax, %eax leaq _bss(%rip), %rdi leaq _ebss(%rip), %rcx subq %rdi, %rcx shrq $3, %rcx rep stosq pushq %rsi call load_stage2_idt /* Pass boot_params to initialize_identity_maps() */ movq (%rsp), %rdi call initialize_identity_maps popq %rsi /* * Do the extraction, and jump to the new kernel.. */ pushq %rsi /* Save the real mode argument */ movq %rsi, %rdi /* real mode address */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq input_data(%rip), %rdx /* input_data */ movl input_len(%rip), %ecx /* input_len */ movq %rbp, %r8 /* output target address */ movl output_len(%rip), %r9d /* decompressed length, end of relocs */ call extract_kernel /* returns kernel location in %rax */ popq %rsi /* * Jump to the decompressed kernel. */ jmp *%rax SYM_FUNC_END(.Lrelocated) .code32 /* * This is the 32-bit trampoline that will be copied over to low memory. * * Return address is at the top of the stack (might be above 4G). * ECX contains the base address of the trampoline memory. * Non zero RDX means trampoline needs to enable 5-level paging. */ SYM_CODE_START(trampoline_32bit_src) /* Set up data and stack segments */ movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %ss /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 /* Check what paging mode we want to be in after the trampoline */ testl %edx, %edx jz 1f /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */ movl %cr4, %eax testl $X86_CR4_LA57, %eax jnz 3f jmp 2f 1: /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */ movl %cr4, %eax testl $X86_CR4_LA57, %eax jz 3f 2: /* Point CR3 to the trampoline's new top level page table */ leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax movl %eax, %cr3 3: /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ pushl %ecx pushl %edx movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax /* Avoid writing EFER if no change was made (for TDX guest) */ jc 1f wrmsr 1: popl %edx popl %ecx #ifdef CONFIG_X86_MCE /* * Preserve CR4.MCE if the kernel will enable #MC support. * Clearing MCE may fault in some environments (that also force #MC * support). Any machine check that occurs before #MC support is fully * configured will crash the system regardless of the CR4.MCE value set * here. */ movl %cr4, %eax andl $X86_CR4_MCE, %eax #else movl $0, %eax #endif /* Enable PAE and LA57 (if required) paging modes */ orl $X86_CR4_PAE, %eax testl %edx, %edx jz 1f orl $X86_CR4_LA57, %eax 1: movl %eax, %cr4 /* Calculate address of paging_enabled() once we are executing in the trampoline */ leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax /* Prepare the stack for far return to Long Mode */ pushl $__KERNEL_CS pushl %eax /* Enable paging again. */ movl %cr0, %eax btsl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 lret SYM_CODE_END(trampoline_32bit_src) .code64 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled) /* Return from the trampoline */ retq SYM_FUNC_END(.Lpaging_enabled) /* * The trampoline code has a size limit. * Make sure we fail to compile if the trampoline code grows * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. */ .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE .code32 SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode) /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: hlt jmp 1b SYM_FUNC_END(.Lno_longmode) #include "../../kernel/verify_cpu.S" .data SYM_DATA_START_LOCAL(gdt64) .word gdt_end - gdt - 1 .quad gdt - gdt64 SYM_DATA_END(gdt64) .balign 8 SYM_DATA_START_LOCAL(gdt) .word gdt_end - gdt - 1 .long 0 .word 0 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */ .quad 0x00af9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) SYM_DATA_START(boot_idt_desc) .word boot_idt_end - boot_idt - 1 .quad 0 SYM_DATA_END(boot_idt_desc) .balign 8 SYM_DATA_START(boot_idt) .rept BOOT_IDT_ENTRIES .quad 0 .quad 0 .endr SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA_START(boot32_idt_desc) .word boot32_idt_end - boot32_idt - 1 .long 0 SYM_DATA_END(boot32_idt_desc) .balign 8 SYM_DATA_START(boot32_idt) .rept 32 .quad 0 .endr SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end) #endif #ifdef CONFIG_EFI_STUB SYM_DATA(image_offset, .long 0) #endif #ifdef CONFIG_EFI_MIXED SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0) SYM_DATA(efi_is64, .byte 1) #define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime) #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol) #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base) __HEAD .code32 SYM_FUNC_START(efi32_pe_entry) /* * efi_status_t efi32_pe_entry(efi_handle_t image_handle, * efi_system_table_32_t *sys_table) */ pushl %ebp movl %esp, %ebp pushl %eax // dummy push to allocate loaded_image pushl %ebx // save callee-save registers pushl %edi call verify_cpu // check for long mode support testl %eax, %eax movl $0x80000003, %eax // EFI_UNSUPPORTED jnz 2f call 1f 1: pop %ebx subl $ rva(1b), %ebx /* Get the loaded image protocol pointer from the image handle */ leal -4(%ebp), %eax pushl %eax // &loaded_image leal rva(loaded_image_proto)(%ebx), %eax pushl %eax // pass the GUID address pushl 8(%ebp) // pass the image handle /* * Note the alignment of the stack frame. * sys_table * handle <-- 16-byte aligned on entry by ABI * return address * frame pointer * loaded_image <-- local variable * saved %ebx <-- 16-byte aligned here * saved %edi * &loaded_image * &loaded_image_proto * handle <-- 16-byte aligned for call to handle_protocol */ movl 12(%ebp), %eax // sys_table movl ST32_boottime(%eax), %eax // sys_table->boottime call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol addl $12, %esp // restore argument space testl %eax, %eax jnz 2f movl 8(%ebp), %ecx // image_handle movl 12(%ebp), %edx // sys_table movl -4(%ebp), %esi // loaded_image movl LI32_image_base(%esi), %esi // loaded_image->image_base movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry /* * We need to set the image_offset variable here since startup_32() will * use it before we get to the 64-bit efi_pe_entry() in C code. */ subl %esi, %ebx movl %ebx, rva(image_offset)(%ebp) // save image_offset jmp efi32_pe_stub_entry 2: popl %edi // restore callee-save registers popl %ebx leave RET SYM_FUNC_END(efi32_pe_entry) .section ".rodata" /* EFI loaded image protocol GUID */ .balign 4 SYM_DATA_START_LOCAL(loaded_image_proto) .long 0x5b1b31a1 .word 0x9562, 0x11d2 .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b SYM_DATA_END(loaded_image_proto) #endif #ifdef CONFIG_AMD_MEM_ENCRYPT __HEAD .code32 /* * Write an IDT entry into boot32_idt * * Parameters: * * %eax: Handler address * %edx: Vector number * * Physical offset is expected in %ebp */ SYM_FUNC_START(startup32_set_idt_entry) push %ebx push %ecx /* IDT entry address to %ebx */ leal rva(boot32_idt)(%ebp), %ebx shl $3, %edx addl %edx, %ebx /* Build IDT entry, lower 4 bytes */ movl %eax, %edx andl $0x0000ffff, %edx # Target code segment offset [15:0] movl $__KERNEL32_CS, %ecx # Target code segment selector shl $16, %ecx orl %ecx, %edx /* Store lower 4 bytes to IDT */ movl %edx, (%ebx) /* Build IDT entry, upper 4 bytes */ movl %eax, %edx andl $0xffff0000, %edx # Target code segment offset [31:16] orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate /* Store upper 4 bytes to IDT */ movl %edx, 4(%ebx) pop %ecx pop %ebx RET SYM_FUNC_END(startup32_set_idt_entry) #endif SYM_FUNC_START(startup32_load_idt) #ifdef CONFIG_AMD_MEM_ENCRYPT /* #VC handler */ leal rva(startup32_vc_handler)(%ebp), %eax movl $X86_TRAP_VC, %edx call startup32_set_idt_entry /* Load IDT */ leal rva(boot32_idt)(%ebp), %eax movl %eax, rva(boot32_idt_desc+2)(%ebp) lidt rva(boot32_idt_desc)(%ebp) #endif RET SYM_FUNC_END(startup32_load_idt) /* * Check for the correct C-bit position when the startup_32 boot-path is used. * * The check makes use of the fact that all memory is encrypted when paging is * disabled. The function creates 64 bits of random data using the RDRAND * instruction. RDRAND is mandatory for SEV guests, so always available. If the * hypervisor violates that the kernel will crash right here. * * The 64 bits of random data are stored to a memory location and at the same * time kept in the %eax and %ebx registers. Since encryption is always active * when paging is off the random data will be stored encrypted in main memory. * * Then paging is enabled. When the C-bit position is correct all memory is * still mapped encrypted and comparing the register values with memory will * succeed. An incorrect C-bit position will map all memory unencrypted, so that * the compare will use the encrypted random data and fail. */ SYM_FUNC_START(startup32_check_sev_cbit) #ifdef CONFIG_AMD_MEM_ENCRYPT pushl %eax pushl %ebx pushl %ecx pushl %edx /* Check for non-zero sev_status */ movl rva(sev_status)(%ebp), %eax testl %eax, %eax jz 4f /* * Get two 32-bit random values - Don't bail out if RDRAND fails * because it is better to prevent forward progress if no random value * can be gathered. */ 1: rdrand %eax jnc 1b 2: rdrand %ebx jnc 2b /* Store to memory and keep it in the registers */ movl %eax, rva(sev_check_data)(%ebp) movl %ebx, rva(sev_check_data+4)(%ebp) /* Enable paging to see if encryption is active */ movl %cr0, %edx /* Backup %cr0 in %edx */ movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */ movl %ecx, %cr0 cmpl %eax, rva(sev_check_data)(%ebp) jne 3f cmpl %ebx, rva(sev_check_data+4)(%ebp) jne 3f movl %edx, %cr0 /* Restore previous %cr0 */ jmp 4f 3: /* Check failed - hlt the machine */ hlt jmp 3b 4: popl %edx popl %ecx popl %ebx popl %eax #endif RET SYM_FUNC_END(startup32_check_sev_cbit) /* * Stack and heap for uncompression */ .bss .balign 4 SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0) SYM_DATA_START_LOCAL(boot_stack) .fill BOOT_STACK_SIZE, 1, 0 .balign 16 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end) /* * Space for page tables (not in .bss so not zeroed) */ .section ".pgtable","aw",@nobits .balign 4096 SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0) /* * The page table is going to be used instead of page table in the trampoline * memory. */ SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0)
aixcc-public/challenge-001-exemplar-source
1,291
arch/x86/boot/compressed/idt_handlers_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Early IDT handler entry points * * Copyright (C) 2019 SUSE * * Author: Joerg Roedel <jroedel@suse.de> */ #include <asm/segment.h> /* For ORIG_RAX */ #include "../../entry/calling.h" .macro EXCEPTION_HANDLER name function error_code=0 SYM_FUNC_START(\name) /* Build pt_regs */ .if \error_code == 0 pushq $0 .endif pushq %rdi pushq %rsi pushq %rdx pushq %rcx pushq %rax pushq %r8 pushq %r9 pushq %r10 pushq %r11 pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 /* Call handler with pt_regs */ movq %rsp, %rdi /* Error code is second parameter */ movq ORIG_RAX(%rsp), %rsi call \function /* Restore regs */ popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx popq %r11 popq %r10 popq %r9 popq %r8 popq %rax popq %rcx popq %rdx popq %rsi popq %rdi /* Remove error code and return */ addq $8, %rsp iretq SYM_FUNC_END(\name) .endm .text .code64 EXCEPTION_HANDLER boot_page_fault do_boot_page_fault error_code=1 #ifdef CONFIG_AMD_MEM_ENCRYPT EXCEPTION_HANDLER boot_stage1_vc do_vc_no_ghcb error_code=1 EXCEPTION_HANDLER boot_stage2_vc do_boot_stage2_vc error_code=1 #endif
aixcc-public/challenge-001-exemplar-source
3,674
arch/x86/entry/vdso/vsgx.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/export.h> #include <asm/errno.h> #include <asm/enclu.h> #include "extable.h" /* Relative to %rbp. */ #define SGX_ENCLAVE_OFFSET_OF_RUN 16 /* The offsets relative to struct sgx_enclave_run. */ #define SGX_ENCLAVE_RUN_TCS 0 #define SGX_ENCLAVE_RUN_LEAF 8 #define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12 #define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14 #define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16 #define SGX_ENCLAVE_RUN_USER_HANDLER 24 #define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */ #define SGX_ENCLAVE_RUN_RESERVED_START 40 #define SGX_ENCLAVE_RUN_RESERVED_END 256 .code64 .section .text, "ax" SYM_FUNC_START(__vdso_sgx_enter_enclave) /* Prolog */ .cfi_startproc push %rbp .cfi_adjust_cfa_offset 8 .cfi_rel_offset %rbp, 0 mov %rsp, %rbp .cfi_def_cfa_register %rbp push %rbx .cfi_rel_offset %rbx, -8 mov %ecx, %eax .Lenter_enclave: /* EENTER <= function <= ERESUME */ cmp $EENTER, %eax jb .Linvalid_input cmp $ERESUME, %eax ja .Linvalid_input mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx /* Validate that the reserved area contains only zeros. */ mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx 1: cmpq $0, (%rcx, %rbx) jne .Linvalid_input add $8, %rbx cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx jne 1b /* Load TCS and AEP */ mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx lea .Lasync_exit_pointer(%rip), %rcx /* Single ENCLU serving as both EENTER and AEP (ERESUME) */ .Lasync_exit_pointer: .Lenclu_eenter_eresume: enclu /* EEXIT jumps here unless the enclave is doing something fancy. */ mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx /* Set exit_reason. */ movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx) /* Invoke userspace's exit handler if one was provided. */ .Lhandle_exit: cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx) jne .Linvoke_userspace_handler /* Success, in the sense that ENCLU was attempted. */ xor %eax, %eax .Lout: pop %rbx leave .cfi_def_cfa %rsp, 8 RET /* The out-of-line code runs with the pre-leave stack frame. */ .cfi_def_cfa %rbp, 16 .Linvalid_input: mov $(-EINVAL), %eax jmp .Lout .Lhandle_exception: mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx /* Set the exception info. */ mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx) mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx) mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx) mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx) jmp .Lhandle_exit .Linvoke_userspace_handler: /* Pass the untrusted RSP (at exit) to the callback via %rcx. */ mov %rsp, %rcx /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */ mov %rbx, %rax /* Save the untrusted RSP offset in %rbx (non-volatile register). */ mov %rsp, %rbx and $0xf, %rbx /* * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned * _after_ pushing the parameters on the stack, hence the bonus push. */ and $-0x10, %rsp push %rax /* Push struct sgx_enclave_exception as a param to the callback. */ push %rax /* Clear RFLAGS.DF per x86_64 ABI */ cld /* * Load the callback pointer to %rax and lfence for LVI (load value * injection) protection before making the call. */ mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax lfence call *%rax /* Undo the post-exit %rsp adjustment. */ lea 0x10(%rsp, %rbx), %rsp /* * If the return from callback is zero or negative, return immediately, * else re-execute ENCLU with the positive return value interpreted as * the requested ENCLU function. */ cmp $0, %eax jle .Lout jmp .Lenter_enclave .cfi_endproc _ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception) SYM_FUNC_END(__vdso_sgx_enter_enclave)
aixcc-public/challenge-001-exemplar-source
2,659
arch/x86/entry/vdso/vdso-layout.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/vdso.h> /* * Linker script for vDSO. This is an ELF shared object prelinked to * its virtual address, and with only one read-only segment. * This script controls its layout. */ SECTIONS { /* * User/kernel shared data is before the vDSO. This may be a little * uglier than putting it after the vDSO, but it avoids issues with * non-allocatable things that dangle past the end of the PT_LOAD * segment. */ vvar_start = . - 4 * PAGE_SIZE; vvar_page = vvar_start; /* Place all vvars at the offsets in asm/vvar.h. */ #define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset; #include <asm/vvar.h> #undef EMIT_VVAR pvclock_page = vvar_start + PAGE_SIZE; hvclock_page = vvar_start + 2 * PAGE_SIZE; timens_page = vvar_start + 3 * PAGE_SIZE; #undef _ASM_X86_VVAR_H /* Place all vvars in timens too at the offsets in asm/vvar.h. */ #define EMIT_VVAR(name, offset) timens_ ## name = timens_page + offset; #include <asm/vvar.h> #undef EMIT_VVAR . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) *(.data*) *(.sdata*) *(.got.plt) *(.got) *(.gnu.linkonce.d.*) *(.bss*) *(.dynbss*) *(.gnu.linkonce.b.*) } :text /* * Discard .note.gnu.property sections which are unused and have * different alignment requirement from vDSO note sections. */ /DISCARD/ : { *(.note.gnu.property) } .note : { *(.note.*) } :text :note .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text /* * Text is well-separated from actual data: there's plenty of * stuff that isn't used at runtime in between. */ .text : { *(.text*) } :text =0x90909090, .altinstructions : { *(.altinstructions) } :text .altinstr_replacement : { *(.altinstr_replacement) } :text __ex_table : { *(__ex_table) } :text /DISCARD/ : { *(.discard) *(.discard.*) *(__bug_table) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; }
aixcc-public/challenge-001-exemplar-source
4,984
arch/x86/entry/vdso/vdso32/sigreturn.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/unistd_32.h> #include <asm/asm-offsets.h> #ifndef SYSCALL_ENTER_KERNEL #define SYSCALL_ENTER_KERNEL int $0x80 #endif .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ ALIGN __kernel_sigreturn: .LSTART_sigreturn: popl %eax /* XXX does this mean it needs unwind info? */ movl $__NR_sigreturn, %eax SYSCALL_ENTER_KERNEL .LEND_sigreturn: SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL) nop .size __kernel_sigreturn,.-.LSTART_sigreturn .globl __kernel_rt_sigreturn .type __kernel_rt_sigreturn,@function ALIGN __kernel_rt_sigreturn: .LSTART_rt_sigreturn: movl $__NR_rt_sigreturn, %eax SYSCALL_ENTER_KERNEL .LEND_rt_sigreturn: SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL) nop .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn .previous .section .eh_frame,"a",@progbits .LSTARTFRAMEDLSI1: .long .LENDCIEDLSI1-.LSTARTCIEDLSI1 .LSTARTCIEDLSI1: .long 0 /* CIE ID */ .byte 1 /* Version number */ .string "zRS" /* NUL-terminated augmentation string */ .uleb128 1 /* Code alignment factor */ .sleb128 -4 /* Data alignment factor */ .byte 8 /* Return address register column */ .uleb128 1 /* Augmentation value length */ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */ .byte 0 /* DW_CFA_nop */ .align 4 .LENDCIEDLSI1: .long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */ .LSTARTFDEDLSI1: .long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */ /* HACK: The dwarf2 unwind routines will subtract 1 from the return address to get an address in the middle of the presumed call instruction. Since we didn't get here via a call, we need to include the nop before the real start to make up for it. */ .long .LSTART_sigreturn-1-. /* PC-relative start address */ .long .LEND_sigreturn-.LSTART_sigreturn+1 .uleb128 0 /* Augmentation */ /* What follows are the instructions for the table generation. We record the locations of each register saved. This is complicated by the fact that the "CFA" is always assumed to be the value of the stack pointer in the caller. This means that we must define the CFA of this body of code to be the saved value of the stack pointer in the sigcontext. Which also means that there is no fixed relation to the other saved registers, which means that we must use DW_CFA_expression to compute their addresses. It also means that when we adjust the stack with the popl, we have to do it all over again. */ #define do_cfa_expr(offset) \ .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ .uleb128 1f-0f; /* length */ \ 0: .byte 0x74; /* DW_OP_breg4 */ \ .sleb128 offset; /* offset */ \ .byte 0x06; /* DW_OP_deref */ \ 1: #define do_expr(regno, offset) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno; /* regno */ \ .uleb128 1f-0f; /* length */ \ 0: .byte 0x74; /* DW_OP_breg4 */ \ .sleb128 offset; /* offset */ \ 1: do_cfa_expr(IA32_SIGCONTEXT_sp+4) do_expr(0, IA32_SIGCONTEXT_ax+4) do_expr(1, IA32_SIGCONTEXT_cx+4) do_expr(2, IA32_SIGCONTEXT_dx+4) do_expr(3, IA32_SIGCONTEXT_bx+4) do_expr(5, IA32_SIGCONTEXT_bp+4) do_expr(6, IA32_SIGCONTEXT_si+4) do_expr(7, IA32_SIGCONTEXT_di+4) do_expr(8, IA32_SIGCONTEXT_ip+4) .byte 0x42 /* DW_CFA_advance_loc 2 -- nop; popl eax. */ do_cfa_expr(IA32_SIGCONTEXT_sp) do_expr(0, IA32_SIGCONTEXT_ax) do_expr(1, IA32_SIGCONTEXT_cx) do_expr(2, IA32_SIGCONTEXT_dx) do_expr(3, IA32_SIGCONTEXT_bx) do_expr(5, IA32_SIGCONTEXT_bp) do_expr(6, IA32_SIGCONTEXT_si) do_expr(7, IA32_SIGCONTEXT_di) do_expr(8, IA32_SIGCONTEXT_ip) .align 4 .LENDFDEDLSI1: .long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */ .LSTARTFDEDLSI2: .long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */ /* HACK: See above wrt unwind library assumptions. */ .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */ .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1 .uleb128 0 /* Augmentation */ /* What follows are the instructions for the table generation. We record the locations of each register saved. This is slightly less complicated than the above, since we don't modify the stack pointer in the process. */ do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp) do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax) do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx) do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx) do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx) do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp) do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si) do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di) do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip) .align 4 .LENDFDEDLSI2: .previous
aixcc-public/challenge-001-exemplar-source
2,429
arch/x86/entry/vdso/vdso32/system_call.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * AT_SYSINFO entry point */ #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> .text .globl __kernel_vsyscall .type __kernel_vsyscall,@function ALIGN __kernel_vsyscall: CFI_STARTPROC /* * Reshuffle regs so that all of any of the entry instructions * will preserve enough state. * * A really nice entry sequence would be: * pushl %edx * pushl %ecx * movl %esp, %ecx * * Unfortunately, naughty Android versions between July and December * 2015 actually hardcode the traditional Linux SYSENTER entry * sequence. That is severely broken for a number of reasons (ask * anyone with an AMD CPU, for example). Nonetheless, we try to keep * it working approximately as well as it ever worked. * * This link may elucidate some of the history: * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 * personally, I find it hard to understand what's going on there. * * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE. * Execute an indirect call to the address in the AT_SYSINFO auxv * entry. That is the ONLY correct way to make a fast 32-bit system * call on Linux. (Open-coding int $0x80 is also fine, but it's * slow.) */ pushl %ecx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ecx, 0 pushl %edx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET edx, 0 pushl %ebp CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ebp, 0 #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" #ifdef CONFIG_X86_64 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 #else ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP #endif /* Enter using int $0x80 */ int $0x80 SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) /* * Restore EDX and ECX in case they were clobbered. EBP is not * clobbered (the kernel restores it), but it's cleaner and * probably faster to pop it than to adjust ESP using addl. */ popl %ebp CFI_RESTORE ebp CFI_ADJUST_CFA_OFFSET -4 popl %edx CFI_RESTORE edx CFI_ADJUST_CFA_OFFSET -4 popl %ecx CFI_RESTORE ecx CFI_ADJUST_CFA_OFFSET -4 RET CFI_ENDPROC .size __kernel_vsyscall,.-__kernel_vsyscall .previous
aixcc-public/challenge-001-exemplar-source
2,555
arch/x86/virt/vmx/tdx/tdxcall.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/asm-offsets.h> #include <asm/tdx.h> /* * TDCALL and SEAMCALL are supported in Binutils >= 2.36. */ #define tdcall .byte 0x66,0x0f,0x01,0xcc #define seamcall .byte 0x66,0x0f,0x01,0xcf /* * TDX_MODULE_CALL - common helper macro for both * TDCALL and SEAMCALL instructions. * * TDCALL - used by TDX guests to make requests to the * TDX module and hypercalls to the VMM. * SEAMCALL - used by TDX hosts to make requests to the * TDX module. */ .macro TDX_MODULE_CALL host:req /* * R12 will be used as temporary storage for struct tdx_module_output * pointer. Since R12-R15 registers are not used by TDCALL/SEAMCALL * services supported by this function, it can be reused. */ /* Callee saved, so preserve it */ push %r12 /* * Push output pointer to stack. * After the operation, it will be fetched into R12 register. */ push %r9 /* Mangle function call ABI into TDCALL/SEAMCALL ABI: */ /* Move Leaf ID to RAX */ mov %rdi, %rax /* Move input 4 to R9 */ mov %r8, %r9 /* Move input 3 to R8 */ mov %rcx, %r8 /* Move input 1 to RCX */ mov %rsi, %rcx /* Leave input param 2 in RDX */ .if \host seamcall /* * SEAMCALL instruction is essentially a VMExit from VMX root * mode to SEAM VMX root mode. VMfailInvalid (CF=1) indicates * that the targeted SEAM firmware is not loaded or disabled, * or P-SEAMLDR is busy with another SEAMCALL. %rax is not * changed in this case. * * Set %rax to TDX_SEAMCALL_VMFAILINVALID for VMfailInvalid. * This value will never be used as actual SEAMCALL error code as * it is from the Reserved status code class. */ jnc .Lno_vmfailinvalid mov $TDX_SEAMCALL_VMFAILINVALID, %rax .Lno_vmfailinvalid: .else tdcall .endif /* * Fetch output pointer from stack to R12 (It is used * as temporary storage) */ pop %r12 /* * Since this macro can be invoked with NULL as an output pointer, * check if caller provided an output struct before storing output * registers. * * Update output registers, even if the call failed (RAX != 0). * Other registers may contain details of the failure. */ test %r12, %r12 jz .Lno_output_struct /* Copy result registers to output struct: */ movq %rcx, TDX_MODULE_rcx(%r12) movq %rdx, TDX_MODULE_rdx(%r12) movq %r8, TDX_MODULE_r8(%r12) movq %r9, TDX_MODULE_r9(%r12) movq %r10, TDX_MODULE_r10(%r12) movq %r11, TDX_MODULE_r11(%r12) .Lno_output_struct: /* Restore the state of R12 register */ pop %r12 .endm
aixcc-public/challenge-001-exemplar-source
5,120
arch/csky/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include <abi/entry.h> #include <abi/pgtable-bits.h> #include <asm/errno.h> #include <asm/setup.h> #include <asm/unistd.h> #include <asm/asm-offsets.h> #include <linux/threads.h> #include <asm/page.h> #include <asm/thread_info.h> .macro zero_fp #ifdef CONFIG_STACKTRACE movi r8, 0 #endif .endm .macro context_tracking #ifdef CONFIG_CONTEXT_TRACKING_USER mfcr a0, epsr btsti a0, 31 bt 1f jbsr user_exit_callable ldw a0, (sp, LSAVE_A0) ldw a1, (sp, LSAVE_A1) ldw a2, (sp, LSAVE_A2) ldw a3, (sp, LSAVE_A3) #if defined(__CSKYABIV1__) ldw r6, (sp, LSAVE_A4) ldw r7, (sp, LSAVE_A5) #endif 1: #endif .endm .text ENTRY(csky_pagefault) SAVE_ALL 0 zero_fp context_tracking psrset ee mov a0, sp jbsr do_page_fault jmpi ret_from_exception ENTRY(csky_systemcall) SAVE_ALL TRAP0_SIZE zero_fp context_tracking psrset ee, ie lrw r9, __NR_syscalls cmphs syscallid, r9 /* Check nr of syscall */ bt 1f lrw r9, sys_call_table ixw r9, syscallid ldw syscallid, (r9) cmpnei syscallid, 0 bf ret_from_exception mov r9, sp bmaski r10, THREAD_SHIFT andn r9, r10 ldw r10, (r9, TINFO_FLAGS) lrw r9, _TIF_SYSCALL_WORK and r10, r9 cmpnei r10, 0 bt csky_syscall_trace #if defined(__CSKYABIV2__) subi sp, 8 stw r5, (sp, 0x4) stw r4, (sp, 0x0) jsr syscallid /* Do system call */ addi sp, 8 #else jsr syscallid #endif stw a0, (sp, LSAVE_A0) /* Save return value */ 1: #ifdef CONFIG_DEBUG_RSEQ mov a0, sp jbsr rseq_syscall #endif jmpi ret_from_exception csky_syscall_trace: mov a0, sp /* sp = pt_regs pointer */ jbsr syscall_trace_enter cmpnei a0, 0 bt 1f /* Prepare args before do system call */ ldw a0, (sp, LSAVE_A0) ldw a1, (sp, LSAVE_A1) ldw a2, (sp, LSAVE_A2) ldw a3, (sp, LSAVE_A3) #if defined(__CSKYABIV2__) subi sp, 8 ldw r9, (sp, LSAVE_A4) stw r9, (sp, 0x0) ldw r9, (sp, LSAVE_A5) stw r9, (sp, 0x4) jsr syscallid /* Do system call */ addi sp, 8 #else ldw r6, (sp, LSAVE_A4) ldw r7, (sp, LSAVE_A5) jsr syscallid /* Do system call */ #endif stw a0, (sp, LSAVE_A0) /* Save return value */ 1: #ifdef CONFIG_DEBUG_RSEQ mov a0, sp jbsr rseq_syscall #endif mov a0, sp /* right now, sp --> pt_regs */ jbsr syscall_trace_exit br ret_from_exception ENTRY(ret_from_kernel_thread) jbsr schedule_tail mov a0, r10 jsr r9 jbsr ret_from_exception ENTRY(ret_from_fork) jbsr schedule_tail mov r9, sp bmaski r10, THREAD_SHIFT andn r9, r10 ldw r10, (r9, TINFO_FLAGS) lrw r9, _TIF_SYSCALL_WORK and r10, r9 cmpnei r10, 0 bf ret_from_exception mov a0, sp /* sp = pt_regs pointer */ jbsr syscall_trace_exit ret_from_exception: psrclr ie ld r9, (sp, LSAVE_PSR) btsti r9, 31 bt 1f /* * Load address of current->thread_info, Then get address of task_struct * Get task_needreshed in task_struct */ mov r9, sp bmaski r10, THREAD_SHIFT andn r9, r10 ldw r10, (r9, TINFO_FLAGS) lrw r9, _TIF_WORK_MASK and r10, r9 cmpnei r10, 0 bt exit_work #ifdef CONFIG_CONTEXT_TRACKING_USER jbsr user_enter_callable #endif 1: #ifdef CONFIG_PREEMPTION mov r9, sp bmaski r10, THREAD_SHIFT andn r9, r10 ldw r10, (r9, TINFO_PREEMPT) cmpnei r10, 0 bt 2f jbsr preempt_schedule_irq /* irq en/disable is done inside */ 2: #endif #ifdef CONFIG_TRACE_IRQFLAGS ld r10, (sp, LSAVE_PSR) btsti r10, 6 bf 2f jbsr trace_hardirqs_on 2: #endif RESTORE_ALL exit_work: lrw r9, ret_from_exception mov lr, r9 btsti r10, TIF_NEED_RESCHED bt work_resched psrset ie mov a0, sp mov a1, r10 jmpi do_notify_resume work_resched: jmpi schedule ENTRY(csky_trap) SAVE_ALL 0 zero_fp context_tracking psrset ee mov a0, sp /* Push Stack pointer arg */ jbsr trap_c /* Call C-level trap handler */ jmpi ret_from_exception /*  * Prototype from libc for abiv1:  * register unsigned int __result asm("a0");  * asm( "trap 3" :"=r"(__result)::);  */ ENTRY(csky_get_tls) USPTOKSP RD_MEH a0 WR_MEH a0 /* increase epc for continue */ mfcr a0, epc addi a0, TRAP0_SIZE mtcr a0, epc /* get current task thread_info with kernel 8K stack */ bmaski a0, THREAD_SHIFT not a0 subi sp, 1 and a0, sp addi sp, 1 /* get tls */ ldw a0, (a0, TINFO_TP_VALUE) KSPTOUSP rte ENTRY(csky_irq) SAVE_ALL 0 zero_fp context_tracking psrset ee #ifdef CONFIG_TRACE_IRQFLAGS jbsr trace_hardirqs_off #endif mov a0, sp jbsr generic_handle_arch_irq jmpi ret_from_exception /* * a0 = prev task_struct * * a1 = next task_struct * * a0 = return next */ ENTRY(__switch_to) lrw a3, TASK_THREAD addu a3, a0 SAVE_SWITCH_STACK stw sp, (a3, THREAD_KSP) /* Set up next process to run */ lrw a3, TASK_THREAD addu a3, a1 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ #if defined(__CSKYABIV2__) addi a3, a1, TASK_THREAD_INFO ldw tls, (a3, TINFO_TP_VALUE) #endif RESTORE_SWITCH_STACK rts ENDPROC(__switch_to)
aixcc-public/challenge-001-exemplar-source
1,990
arch/csky/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/vmlinux.lds.h> #include <asm/page.h> #include <asm/memory.h> OUTPUT_ARCH(csky) ENTRY(_start) #ifndef __cskyBE__ jiffies = jiffies_64; #else jiffies = jiffies_64 + 4; #endif #define VBR_BASE \ . = ALIGN(1024); \ vec_base = .; \ . += 512; SECTIONS { . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; _start = .; HEAD_TEXT_SECTION . = ALIGN(PAGE_SIZE); .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; _stext = .; VBR_BASE IRQENTRY_TEXT SOFTIRQENTRY_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.fixup) *(.gnu.warning) } = 0 _etext = .; /* __init_begin __init_end must be page aligned for free_initmem */ . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(PAGE_SIZE) PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(PAGE_SIZE); __init_end = .; _sdata = .; RO_DATA(PAGE_SIZE) RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; #ifdef CONFIG_HAVE_TCM .tcm_start : { . = ALIGN(PAGE_SIZE); __tcm_start = .; } .text_data_tcm FIXADDR_TCM : AT(__tcm_start) { . = ALIGN(4); __stcm_text_data = .; *(.tcm.text) *(.tcm.rodata) #ifndef CONFIG_HAVE_DTCM *(.tcm.data) #endif . = ALIGN(4); __etcm_text_data = .; } . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); #ifdef CONFIG_HAVE_DTCM #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE .dtcm_start : { __dtcm_start = .; } .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) { . = ALIGN(4); __stcm_data = .; *(.tcm.data) . = ALIGN(4); __etcm_data = .; } . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { #else .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { #endif . = ALIGN(PAGE_SIZE); __tcm_end = .; } #endif EXCEPTION_TABLE(L1_CACHE_BYTES) BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) _end = . ; STABS_DEBUG DWARF_DEBUG ELF_DETAILS DISCARDS }
aixcc-public/challenge-001-exemplar-source
1,945
arch/csky/abiv2/memcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include "sysdep.h" ENTRY(__memcpy) ENTRY(memcpy) /* Test if len less than 4 bytes. */ mov r12, r0 cmplti r2, 4 bt .L_copy_by_byte andi r13, r0, 3 movi r19, 4 /* Test if dest is not 4 bytes aligned. */ bnez r13, .L_dest_not_aligned /* Hardware can handle unaligned access directly. */ .L_dest_aligned: /* If dest is aligned, then copy. */ zext r18, r2, 31, 4 /* Test if len less than 16 bytes. */ bez r18, .L_len_less_16bytes movi r19, 0 LABLE_ALIGN .L_len_larger_16bytes: #if defined(__CK860__) ldw r3, (r1, 0) stw r3, (r0, 0) ldw r3, (r1, 4) stw r3, (r0, 4) ldw r3, (r1, 8) stw r3, (r0, 8) ldw r3, (r1, 12) addi r1, 16 stw r3, (r0, 12) addi r0, 16 #else ldw r20, (r1, 0) ldw r21, (r1, 4) ldw r22, (r1, 8) ldw r23, (r1, 12) stw r20, (r0, 0) stw r21, (r0, 4) stw r22, (r0, 8) stw r23, (r0, 12) PRE_BNEZAD (r18) addi r1, 16 addi r0, 16 #endif BNEZAD (r18, .L_len_larger_16bytes) .L_len_less_16bytes: zext r18, r2, 3, 2 bez r18, .L_copy_by_byte .L_len_less_16bytes_loop: ldw r3, (r1, 0) PRE_BNEZAD (r18) addi r1, 4 stw r3, (r0, 0) addi r0, 4 BNEZAD (r18, .L_len_less_16bytes_loop) /* Test if len less than 4 bytes. */ .L_copy_by_byte: zext r18, r2, 1, 0 bez r18, .L_return .L_copy_by_byte_loop: ldb r3, (r1, 0) PRE_BNEZAD (r18) addi r1, 1 stb r3, (r0, 0) addi r0, 1 BNEZAD (r18, .L_copy_by_byte_loop) .L_return: mov r0, r12 rts /* * If dest is not aligned, just copying some bytes makes the * dest align. */ .L_dest_not_aligned: sub r13, r19, r13 sub r2, r13 /* Makes the dest align. */ .L_dest_not_aligned_loop: ldb r3, (r1, 0) PRE_BNEZAD (r13) addi r1, 1 stb r3, (r0, 0) addi r0, 1 BNEZAD (r13, .L_dest_not_aligned_loop) cmplti r2, 4 bt .L_copy_by_byte /* Check whether the src is aligned. */ jbr .L_dest_aligned ENDPROC(__memcpy)
aixcc-public/challenge-001-exemplar-source
1,498
arch/csky/abiv2/strcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include "sysdep.h" ENTRY(strcpy) mov a3, a0 /* Check if the src addr is aligned. */ andi t0, a1, 3 bnez t0, 11f 1: /* Check if all the bytes in the word are not zero. */ ldw a2, (a1) tstnbz a2 bf 9f stw a2, (a3) ldw a2, (a1, 4) tstnbz a2 bf 2f stw a2, (a3, 4) ldw a2, (a1, 8) tstnbz a2 bf 3f stw a2, (a3, 8) ldw a2, (a1, 12) tstnbz a2 bf 4f stw a2, (a3, 12) ldw a2, (a1, 16) tstnbz a2 bf 5f stw a2, (a3, 16) ldw a2, (a1, 20) tstnbz a2 bf 6f stw a2, (a3, 20) ldw a2, (a1, 24) tstnbz a2 bf 7f stw a2, (a3, 24) ldw a2, (a1, 28) tstnbz a2 bf 8f stw a2, (a3, 28) addi a3, 32 addi a1, 32 br 1b 2: addi a3, 4 br 9f 3: addi a3, 8 br 9f 4: addi a3, 12 br 9f 5: addi a3, 16 br 9f 6: addi a3, 20 br 9f 7: addi a3, 24 br 9f 8: addi a3, 28 9: # ifdef __CSKYBE__ xtrb0 t0, a2 st.b t0, (a3) bez t0, 10f xtrb1 t0, a2 st.b t0, (a3, 1) bez t0, 10f xtrb2 t0, a2 st.b t0, (a3, 2) bez t0, 10f stw a2, (a3) # else xtrb3 t0, a2 st.b t0, (a3) bez t0, 10f xtrb2 t0, a2 st.b t0, (a3, 1) bez t0, 10f xtrb1 t0, a2 st.b t0, (a3, 2) bez t0, 10f stw a2, (a3) # endif /* !__CSKYBE__ */ 10: jmp lr 11: subi t0, 4 12: ld.b a2, (a1) st.b a2, (a3) bez a2, 10b addi t0, 1 addi a1, a1, 1 addi a3, a3, 1 bnez t0, 12b jbr 1b ENDPROC(strcpy)
aixcc-public/challenge-001-exemplar-source
2,017
arch/csky/abiv2/memmove.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include "sysdep.h" .weak memmove ENTRY(__memmove) ENTRY(memmove) subu r3, r0, r1 cmphs r3, r2 bt memcpy mov r12, r0 addu r0, r0, r2 addu r1, r1, r2 /* Test if len less than 4 bytes. */ cmplti r2, 4 bt .L_copy_by_byte andi r13, r0, 3 /* Test if dest is not 4 bytes aligned. */ bnez r13, .L_dest_not_aligned /* Hardware can handle unaligned access directly. */ .L_dest_aligned: /* If dest is aligned, then copy. */ zext r18, r2, 31, 4 /* Test if len less than 16 bytes. */ bez r18, .L_len_less_16bytes movi r19, 0 /* len > 16 bytes */ LABLE_ALIGN .L_len_larger_16bytes: subi r1, 16 subi r0, 16 #if defined(__CK860__) ldw r3, (r1, 12) stw r3, (r0, 12) ldw r3, (r1, 8) stw r3, (r0, 8) ldw r3, (r1, 4) stw r3, (r0, 4) ldw r3, (r1, 0) stw r3, (r0, 0) #else ldw r20, (r1, 0) ldw r21, (r1, 4) ldw r22, (r1, 8) ldw r23, (r1, 12) stw r20, (r0, 0) stw r21, (r0, 4) stw r22, (r0, 8) stw r23, (r0, 12) PRE_BNEZAD (r18) #endif BNEZAD (r18, .L_len_larger_16bytes) .L_len_less_16bytes: zext r18, r2, 3, 2 bez r18, .L_copy_by_byte .L_len_less_16bytes_loop: subi r1, 4 subi r0, 4 ldw r3, (r1, 0) PRE_BNEZAD (r18) stw r3, (r0, 0) BNEZAD (r18, .L_len_less_16bytes_loop) /* Test if len less than 4 bytes. */ .L_copy_by_byte: zext r18, r2, 1, 0 bez r18, .L_return .L_copy_by_byte_loop: subi r1, 1 subi r0, 1 ldb r3, (r1, 0) PRE_BNEZAD (r18) stb r3, (r0, 0) BNEZAD (r18, .L_copy_by_byte_loop) .L_return: mov r0, r12 rts /* If dest is not aligned, just copy some bytes makes the dest align. */ .L_dest_not_aligned: sub r2, r13 .L_dest_not_aligned_loop: subi r1, 1 subi r0, 1 /* Makes the dest align. */ ldb r3, (r1, 0) PRE_BNEZAD (r13) stb r3, (r0, 0) BNEZAD (r13, .L_dest_not_aligned_loop) cmplti r2, 4 bt .L_copy_by_byte /* Check whether the src is aligned. */ jbr .L_dest_aligned ENDPROC(memmove) ENDPROC(__memmove)
aixcc-public/challenge-001-exemplar-source
1,705
arch/csky/abiv2/memset.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include "sysdep.h" .weak memset ENTRY(__memset) ENTRY(memset) /* Test if len less than 4 bytes. */ mov r12, r0 cmplti r2, 8 bt .L_set_by_byte andi r13, r0, 3 movi r19, 4 /* Test if dest is not 4 bytes aligned. */ bnez r13, .L_dest_not_aligned /* Hardware can handle unaligned access directly. */ .L_dest_aligned: zextb r3, r1 lsli r1, 8 or r1, r3 lsli r3, r1, 16 or r3, r1 /* If dest is aligned, then copy. */ zext r18, r2, 31, 4 /* Test if len less than 16 bytes. */ bez r18, .L_len_less_16bytes LABLE_ALIGN .L_len_larger_16bytes: stw r3, (r0, 0) stw r3, (r0, 4) stw r3, (r0, 8) stw r3, (r0, 12) PRE_BNEZAD (r18) addi r0, 16 BNEZAD (r18, .L_len_larger_16bytes) .L_len_less_16bytes: zext r18, r2, 3, 2 andi r2, 3 bez r18, .L_set_by_byte .L_len_less_16bytes_loop: stw r3, (r0, 0) PRE_BNEZAD (r18) addi r0, 4 BNEZAD (r18, .L_len_less_16bytes_loop) /* Test if len less than 4 bytes. */ .L_set_by_byte: zext r18, r2, 2, 0 bez r18, .L_return .L_set_by_byte_loop: stb r1, (r0, 0) PRE_BNEZAD (r18) addi r0, 1 BNEZAD (r18, .L_set_by_byte_loop) .L_return: mov r0, r12 rts /* If dest is not aligned, just set some bytes makes the dest align. */ .L_dest_not_aligned: sub r13, r19, r13 sub r2, r13 .L_dest_not_aligned_loop: /* Makes the dest align. */ stb r1, (r0, 0) PRE_BNEZAD (r13) addi r0, 1 BNEZAD (r13, .L_dest_not_aligned_loop) cmplti r2, 8 bt .L_set_by_byte /* Check whether the src is aligned. */ jbr .L_dest_aligned ENDPROC(memset) ENDPROC(__memset)
aixcc-public/challenge-001-exemplar-source
3,565
arch/csky/abiv2/mcount.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include <asm/ftrace.h> #include <abi/entry.h> #include <asm/asm-offsets.h> /* * csky-gcc with -pg will put the following asm after prologue: * push r15 * jsri _mcount * * stack layout after mcount_enter in _mcount(): * * current sp => 0:+-------+ * | a0-a3 | -> must save all argument regs * +16:+-------+ * | lr | -> _mcount lr (instrumente function's pc) * +20:+-------+ * | fp=r8 | -> instrumented function fp * +24:+-------+ * | plr | -> instrumented function lr (parent's pc) * +-------+ */ .macro mcount_enter subi sp, 24 stw a0, (sp, 0) stw a1, (sp, 4) stw a2, (sp, 8) stw a3, (sp, 12) stw lr, (sp, 16) stw r8, (sp, 20) .endm .macro mcount_exit ldw a0, (sp, 0) ldw a1, (sp, 4) ldw a2, (sp, 8) ldw a3, (sp, 12) ldw t1, (sp, 16) ldw r8, (sp, 20) ldw lr, (sp, 24) addi sp, 28 jmp t1 .endm .macro mcount_enter_regs subi sp, 8 stw lr, (sp, 0) stw r8, (sp, 4) SAVE_REGS_FTRACE .endm .macro mcount_exit_regs RESTORE_REGS_FTRACE subi sp, 152 ldw t1, (sp, 4) addi sp, 152 ldw r8, (sp, 4) ldw lr, (sp, 8) addi sp, 12 jmp t1 .endm .macro save_return_regs subi sp, 16 stw a0, (sp, 0) stw a1, (sp, 4) stw a2, (sp, 8) stw a3, (sp, 12) .endm .macro restore_return_regs mov lr, a0 ldw a0, (sp, 0) ldw a1, (sp, 4) ldw a2, (sp, 8) ldw a3, (sp, 12) addi sp, 16 .endm .macro nop32_stub nop32 nop32 nop32 .endm ENTRY(ftrace_stub) jmp lr END(ftrace_stub) #ifndef CONFIG_DYNAMIC_FTRACE ENTRY(_mcount) mcount_enter /* r26 is link register, only used with jsri translation */ lrw r26, ftrace_trace_function ldw r26, (r26, 0) lrw a1, ftrace_stub cmpne r26, a1 bf skip_ftrace mov a0, lr subi a0, 4 ldw a1, (sp, 24) lrw a2, function_trace_op ldw a2, (a2, 0) jsr r26 #ifndef CONFIG_FUNCTION_GRAPH_TRACER skip_ftrace: mcount_exit #else skip_ftrace: lrw a0, ftrace_graph_return ldw a0, (a0, 0) lrw a1, ftrace_stub cmpne a0, a1 bt ftrace_graph_caller lrw a0, ftrace_graph_entry ldw a0, (a0, 0) lrw a1, ftrace_graph_entry_stub cmpne a0, a1 bt ftrace_graph_caller mcount_exit #endif END(_mcount) #else /* CONFIG_DYNAMIC_FTRACE */ ENTRY(_mcount) mov t1, lr ldw lr, (sp, 0) addi sp, 4 jmp t1 ENDPROC(_mcount) ENTRY(ftrace_caller) mcount_enter ldw a0, (sp, 16) subi a0, 4 ldw a1, (sp, 24) lrw a2, function_trace_op ldw a2, (a2, 0) nop GLOBAL(ftrace_call) nop32_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER nop GLOBAL(ftrace_graph_call) nop32_stub #endif mcount_exit ENDPROC(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) mov a0, sp addi a0, 24 ldw a1, (sp, 16) subi a1, 4 mov a2, r8 lrw r26, prepare_ftrace_return jsr r26 mcount_exit END(ftrace_graph_caller) ENTRY(return_to_handler) save_return_regs mov a0, r8 jsri ftrace_return_to_handler restore_return_regs jmp lr END(return_to_handler) #endif #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS ENTRY(ftrace_regs_caller) mcount_enter_regs lrw t1, PT_FRAME_SIZE add t1, sp ldw a0, (t1, 0) subi a0, 4 ldw a1, (t1, 8) lrw a2, function_trace_op ldw a2, (a2, 0) mov a3, sp nop GLOBAL(ftrace_regs_call) nop32_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER nop GLOBAL(ftrace_graph_regs_call) nop32_stub #endif mcount_exit_regs ENDPROC(ftrace_regs_caller) #endif /* CONFIG_DYNAMIC_FTRACE */
aixcc-public/challenge-001-exemplar-source
2,929
arch/csky/abiv2/memcmp.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include "sysdep.h" ENTRY(memcmp) /* Test if len less than 4 bytes. */ mov r3, r0 movi r0, 0 mov r12, r4 cmplti r2, 4 bt .L_compare_by_byte andi r13, r0, 3 movi r19, 4 /* Test if s1 is not 4 bytes aligned. */ bnez r13, .L_s1_not_aligned LABLE_ALIGN .L_s1_aligned: /* If dest is aligned, then copy. */ zext r18, r2, 31, 4 /* Test if len less than 16 bytes. */ bez r18, .L_compare_by_word .L_compare_by_4word: /* If aligned, load word each time. */ ldw r20, (r3, 0) ldw r21, (r1, 0) /* If s1[i] != s2[i], goto .L_byte_check. */ cmpne r20, r21 bt .L_byte_check ldw r20, (r3, 4) ldw r21, (r1, 4) cmpne r20, r21 bt .L_byte_check ldw r20, (r3, 8) ldw r21, (r1, 8) cmpne r20, r21 bt .L_byte_check ldw r20, (r3, 12) ldw r21, (r1, 12) cmpne r20, r21 bt .L_byte_check PRE_BNEZAD (r18) addi a3, 16 addi a1, 16 BNEZAD (r18, .L_compare_by_4word) .L_compare_by_word: zext r18, r2, 3, 2 bez r18, .L_compare_by_byte .L_compare_by_word_loop: ldw r20, (r3, 0) ldw r21, (r1, 0) addi r3, 4 PRE_BNEZAD (r18) cmpne r20, r21 addi r1, 4 bt .L_byte_check BNEZAD (r18, .L_compare_by_word_loop) .L_compare_by_byte: zext r18, r2, 1, 0 bez r18, .L_return .L_compare_by_byte_loop: ldb r0, (r3, 0) ldb r4, (r1, 0) addi r3, 1 subu r0, r4 PRE_BNEZAD (r18) addi r1, 1 bnez r0, .L_return BNEZAD (r18, .L_compare_by_byte_loop) .L_return: mov r4, r12 rts # ifdef __CSKYBE__ /* d[i] != s[i] in word, so we check byte 0. */ .L_byte_check: xtrb0 r0, r20 xtrb0 r2, r21 subu r0, r2 bnez r0, .L_return /* check byte 1 */ xtrb1 r0, r20 xtrb1 r2, r21 subu r0, r2 bnez r0, .L_return /* check byte 2 */ xtrb2 r0, r20 xtrb2 r2, r21 subu r0, r2 bnez r0, .L_return /* check byte 3 */ xtrb3 r0, r20 xtrb3 r2, r21 subu r0, r2 # else /* s1[i] != s2[i] in word, so we check byte 3. */ .L_byte_check: xtrb3 r0, r20 xtrb3 r2, r21 subu r0, r2 bnez r0, .L_return /* check byte 2 */ xtrb2 r0, r20 xtrb2 r2, r21 subu r0, r2 bnez r0, .L_return /* check byte 1 */ xtrb1 r0, r20 xtrb1 r2, r21 subu r0, r2 bnez r0, .L_return /* check byte 0 */ xtrb0 r0, r20 xtrb0 r2, r21 subu r0, r2 br .L_return # endif /* !__CSKYBE__ */ /* Compare when s1 is not aligned. */ .L_s1_not_aligned: sub r13, r19, r13 sub r2, r13 .L_s1_not_aligned_loop: ldb r0, (r3, 0) ldb r4, (r1, 0) addi r3, 1 subu r0, r4 PRE_BNEZAD (r13) addi r1, 1 bnez r0, .L_return BNEZAD (r13, .L_s1_not_aligned_loop) br .L_s1_aligned ENDPROC(memcmp)
aixcc-public/challenge-001-exemplar-source
1,481
arch/csky/abiv2/strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include "sysdep.h" ENTRY(strlen) /* Check if the start addr is aligned. */ mov r3, r0 andi r1, r0, 3 movi r2, 4 movi r0, 0 bnez r1, .L_start_not_aligned LABLE_ALIGN .L_start_addr_aligned: /* Check if all the bytes in the word are not zero. */ ldw r1, (r3) tstnbz r1 bf .L_string_tail ldw r1, (r3, 4) addi r0, 4 tstnbz r1 bf .L_string_tail ldw r1, (r3, 8) addi r0, 4 tstnbz r1 bf .L_string_tail ldw r1, (r3, 12) addi r0, 4 tstnbz r1 bf .L_string_tail ldw r1, (r3, 16) addi r0, 4 tstnbz r1 bf .L_string_tail ldw r1, (r3, 20) addi r0, 4 tstnbz r1 bf .L_string_tail ldw r1, (r3, 24) addi r0, 4 tstnbz r1 bf .L_string_tail ldw r1, (r3, 28) addi r0, 4 tstnbz r1 bf .L_string_tail addi r0, 4 addi r3, 32 br .L_start_addr_aligned .L_string_tail: # ifdef __CSKYBE__ xtrb0 r3, r1 bez r3, .L_return addi r0, 1 xtrb1 r3, r1 bez r3, .L_return addi r0, 1 xtrb2 r3, r1 bez r3, .L_return addi r0, 1 # else xtrb3 r3, r1 bez r3, .L_return addi r0, 1 xtrb2 r3, r1 bez r3, .L_return addi r0, 1 xtrb1 r3, r1 bez r3, .L_return addi r0, 1 # endif /* !__CSKYBE__ */ .L_return: rts .L_start_not_aligned: sub r2, r2, r1 .L_start_not_aligned_loop: ldb r1, (r3) PRE_BNEZAD (r2) addi r3, 1 bez r1, .L_return addi r0, 1 BNEZAD (r2, .L_start_not_aligned_loop) br .L_start_addr_aligned ENDPROC(strlen)
aixcc-public/challenge-001-exemplar-source
2,334
arch/csky/abiv2/strcmp.S
/* SPDX-License-Identifier: GPL-2.0 */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/linkage.h> #include "sysdep.h" ENTRY(strcmp) mov a3, a0 /* Check if the s1 addr is aligned. */ xor a2, a3, a1 andi a2, 0x3 bnez a2, 7f andi t1, a0, 0x3 bnez t1, 5f 1: /* If aligned, load word each time. */ ldw t0, (a3, 0) ldw t1, (a1, 0) /* If s1[i] != s2[i], goto 2f. */ cmpne t0, t1 bt 2f /* If s1[i] == s2[i], check if s1 or s2 is at the end. */ tstnbz t0 /* If at the end, goto 3f (finish comparing). */ bf 3f ldw t0, (a3, 4) ldw t1, (a1, 4) cmpne t0, t1 bt 2f tstnbz t0 bf 3f ldw t0, (a3, 8) ldw t1, (a1, 8) cmpne t0, t1 bt 2f tstnbz t0 bf 3f ldw t0, (a3, 12) ldw t1, (a1, 12) cmpne t0, t1 bt 2f tstnbz t0 bf 3f ldw t0, (a3, 16) ldw t1, (a1, 16) cmpne t0, t1 bt 2f tstnbz t0 bf 3f ldw t0, (a3, 20) ldw t1, (a1, 20) cmpne t0, t1 bt 2f tstnbz t0 bf 3f ldw t0, (a3, 24) ldw t1, (a1, 24) cmpne t0, t1 bt 2f tstnbz t0 bf 3f ldw t0, (a3, 28) ldw t1, (a1, 28) cmpne t0, t1 bt 2f tstnbz t0 bf 3f addi a3, 32 addi a1, 32 br 1b # ifdef __CSKYBE__ /* d[i] != s[i] in word, so we check byte 0. */ 2: xtrb0 a0, t0 xtrb0 a2, t1 subu a0, a2 bez a2, 4f bnez a0, 4f /* check byte 1 */ xtrb1 a0, t0 xtrb1 a2, t1 subu a0, a2 bez a2, 4f bnez a0, 4f /* check byte 2 */ xtrb2 a0, t0 xtrb2 a2, t1 subu a0, a2 bez a2, 4f bnez a0, 4f /* check byte 3 */ xtrb3 a0, t0 xtrb3 a2, t1 subu a0, a2 # else /* s1[i] != s2[i] in word, so we check byte 3. */ 2: xtrb3 a0, t0 xtrb3 a2, t1 subu a0, a2 bez a2, 4f bnez a0, 4f /* check byte 2 */ xtrb2 a0, t0 xtrb2 a2, t1 subu a0, a2 bez a2, 4f bnez a0, 4f /* check byte 1 */ xtrb1 a0, t0 xtrb1 a2, t1 subu a0, a2 bez a2, 4f bnez a0, 4f /* check byte 0 */ xtrb0 a0, t0 xtrb0 a2, t1 subu a0, a2 # endif /* !__CSKYBE__ */ jmp lr 3: movi a0, 0 4: jmp lr /* Compare when s1 or s2 is not aligned. */ 5: subi t1, 4 6: ldb a0, (a3, 0) ldb a2, (a1, 0) subu a0, a2 bez a2, 4b bnez a0, 4b addi t1, 1 addi a1, 1 addi a3, 1 bnez t1, 6b br 1b 7: ldb a0, (a3, 0) addi a3, 1 ldb a2, (a1, 0) addi a1, 1 subu a0, a2 bnez a0, 4b bnez a2, 7b jmp r15 ENDPROC(strcmp)
aixcc-public/challenge-001-exemplar-source
1,156
arch/csky/kernel/vdso/vdso.lds.S
/* SPDX-License-Identifier: GPL-2.0-only */ #include <asm/page.h> OUTPUT_ARCH(csky) SECTIONS { PROVIDE(_vdso_data = . + PAGE_SIZE); . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } . = 0x800; .text : { *(.text .text.*) } :text .data : { *(.got.plt) *(.got) *(.data .data.* .gnu.linkonce.d.*) *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) } } PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } VERSION { LINUX_5.10 { global: __vdso_rt_sigreturn; __vdso_clock_gettime; __vdso_clock_gettime64; __vdso_gettimeofday; __vdso_clock_getres; local: *; }; }
aixcc-public/challenge-001-exemplar-source
2,175
arch/alpha/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/kernel/head.S * * initial boot stuff.. At this point, the bootloader has already * switched into OSF/1 PAL-code, and loaded us at the correct address * (START_ADDR). So there isn't much left for us to do: just set up * the kernel global pointer and jump to the kernel entry-point. */ #include <linux/init.h> #include <asm/asm-offsets.h> #include <asm/pal.h> #include <asm/setup.h> __HEAD .globl _stext .set noreorder .globl __start .ent __start _stext: __start: .prologue 0 br $27,1f 1: ldgp $29,0($27) /* We need to get current_task_info loaded up... */ lda $8,init_thread_union /* ... and find our stack ... */ lda $30,0x4000 - SIZEOF_PT_REGS($8) /* ... and then we can start the kernel. */ jsr $26,start_kernel call_pal PAL_halt .end __start #ifdef CONFIG_SMP .align 3 .globl __smp_callin .ent __smp_callin /* On entry here from SRM console, the HWPCB of the per-cpu slot for this processor has been loaded. We've arranged for the UNIQUE value for this process to contain the PCBB of the target idle task. */ __smp_callin: .prologue 1 ldgp $29,0($27) # First order of business, load the GP. call_pal PAL_rduniq # Grab the target PCBB. mov $0,$16 # Install it. call_pal PAL_swpctx lda $8,0x3fff # Find "current". bic $30,$8,$8 jsr $26,smp_callin call_pal PAL_halt .end __smp_callin #endif /* CONFIG_SMP */ # # The following two functions are needed for supporting SRM PALcode # on the PC164 (at least), since that PALcode manages the interrupt # masking, and we cannot duplicate the effort without causing problems # .align 3 .globl cserve_ena .ent cserve_ena cserve_ena: .prologue 0 bis $16,$16,$17 lda $16,52($31) call_pal PAL_cserve ret ($26) .end cserve_ena .align 3 .globl cserve_dis .ent cserve_dis cserve_dis: .prologue 0 bis $16,$16,$17 lda $16,53($31) call_pal PAL_cserve ret ($26) .end cserve_dis # # It is handy, on occasion, to make halt actually just loop. # Putting it here means we dont have to recompile the whole # kernel. # .align 3 .globl halt .ent halt halt: .prologue 0 call_pal PAL_halt .end halt
aixcc-public/challenge-001-exemplar-source
17,981
arch/alpha/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/kernel/entry.S * * Kernel entry-points. */ #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/pal.h> #include <asm/errno.h> #include <asm/unistd.h> .text .set noat .cfi_sections .debug_frame /* Stack offsets. */ #define SP_OFF 184 #define SWITCH_STACK_SIZE 320 .macro CFI_START_OSF_FRAME func .align 4 .globl \func .type \func,@function \func: .cfi_startproc simple .cfi_return_column 64 .cfi_def_cfa $sp, 48 .cfi_rel_offset 64, 8 .cfi_rel_offset $gp, 16 .cfi_rel_offset $16, 24 .cfi_rel_offset $17, 32 .cfi_rel_offset $18, 40 .endm .macro CFI_END_OSF_FRAME func .cfi_endproc .size \func, . - \func .endm /* * This defines the normal kernel pt-regs layout. * * regs 9-15 preserved by C code * regs 16-18 saved by PAL-code * regs 29-30 saved and set up by PAL-code * JRP - Save regs 16-18 in a special area of the stack, so that * the palcode-provided values are available to the signal handler. */ .macro SAVE_ALL subq $sp, SP_OFF, $sp .cfi_adjust_cfa_offset SP_OFF stq $0, 0($sp) stq $1, 8($sp) stq $2, 16($sp) stq $3, 24($sp) stq $4, 32($sp) stq $28, 144($sp) .cfi_rel_offset $0, 0 .cfi_rel_offset $1, 8 .cfi_rel_offset $2, 16 .cfi_rel_offset $3, 24 .cfi_rel_offset $4, 32 .cfi_rel_offset $28, 144 lda $2, alpha_mv stq $5, 40($sp) stq $6, 48($sp) stq $7, 56($sp) stq $8, 64($sp) stq $19, 72($sp) stq $20, 80($sp) stq $21, 88($sp) ldq $2, HAE_CACHE($2) stq $22, 96($sp) stq $23, 104($sp) stq $24, 112($sp) stq $25, 120($sp) stq $26, 128($sp) stq $27, 136($sp) stq $2, 152($sp) stq $16, 160($sp) stq $17, 168($sp) stq $18, 176($sp) .cfi_rel_offset $5, 40 .cfi_rel_offset $6, 48 .cfi_rel_offset $7, 56 .cfi_rel_offset $8, 64 .cfi_rel_offset $19, 72 .cfi_rel_offset $20, 80 .cfi_rel_offset $21, 88 .cfi_rel_offset $22, 96 .cfi_rel_offset $23, 104 .cfi_rel_offset $24, 112 .cfi_rel_offset $25, 120 .cfi_rel_offset $26, 128 .cfi_rel_offset $27, 136 .endm .macro RESTORE_ALL lda $19, alpha_mv ldq $0, 0($sp) ldq $1, 8($sp) ldq $2, 16($sp) ldq $3, 24($sp) ldq $21, 152($sp) ldq $20, HAE_CACHE($19) ldq $4, 32($sp) ldq $5, 40($sp) ldq $6, 48($sp) ldq $7, 56($sp) subq $20, $21, $20 ldq $8, 64($sp) beq $20, 99f ldq $20, HAE_REG($19) stq $21, HAE_CACHE($19) stq $21, 0($20) 99: ldq $19, 72($sp) ldq $20, 80($sp) ldq $21, 88($sp) ldq $22, 96($sp) ldq $23, 104($sp) ldq $24, 112($sp) ldq $25, 120($sp) ldq $26, 128($sp) ldq $27, 136($sp) ldq $28, 144($sp) addq $sp, SP_OFF, $sp .cfi_restore $0 .cfi_restore $1 .cfi_restore $2 .cfi_restore $3 .cfi_restore $4 .cfi_restore $5 .cfi_restore $6 .cfi_restore $7 .cfi_restore $8 .cfi_restore $19 .cfi_restore $20 .cfi_restore $21 .cfi_restore $22 .cfi_restore $23 .cfi_restore $24 .cfi_restore $25 .cfi_restore $26 .cfi_restore $27 .cfi_restore $28 .cfi_adjust_cfa_offset -SP_OFF .endm .macro DO_SWITCH_STACK bsr $1, do_switch_stack .cfi_adjust_cfa_offset SWITCH_STACK_SIZE .cfi_rel_offset $9, 0 .cfi_rel_offset $10, 8 .cfi_rel_offset $11, 16 .cfi_rel_offset $12, 24 .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 /* We don't really care about the FP registers for debugging. */ .endm .macro UNDO_SWITCH_STACK bsr $1, undo_switch_stack .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE .endm /* * Non-syscall kernel entry points. */ CFI_START_OSF_FRAME entInt SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $19 jsr $31, do_entInt CFI_END_OSF_FRAME entInt CFI_START_OSF_FRAME entArith SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $18 jsr $31, do_entArith CFI_END_OSF_FRAME entArith CFI_START_OSF_FRAME entMM SAVE_ALL /* save $9 - $15 so the inline exception code can manipulate them. */ subq $sp, 56, $sp .cfi_adjust_cfa_offset 56 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) stq $12, 24($sp) stq $13, 32($sp) stq $14, 40($sp) stq $15, 48($sp) .cfi_rel_offset $9, 0 .cfi_rel_offset $10, 8 .cfi_rel_offset $11, 16 .cfi_rel_offset $12, 24 .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 addq $sp, 56, $19 /* handle the fault */ lda $8, 0x3fff bic $sp, $8, $8 jsr $26, do_page_fault /* reload the registers after the exception code played. */ ldq $9, 0($sp) ldq $10, 8($sp) ldq $11, 16($sp) ldq $12, 24($sp) ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) addq $sp, 56, $sp .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_adjust_cfa_offset -56 /* finish up the syscall as normal. */ br ret_from_sys_call CFI_END_OSF_FRAME entMM CFI_START_OSF_FRAME entIF SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $17 jsr $31, do_entIF CFI_END_OSF_FRAME entIF CFI_START_OSF_FRAME entUna lda $sp, -256($sp) .cfi_adjust_cfa_offset 256 stq $0, 0($sp) .cfi_rel_offset $0, 0 .cfi_remember_state ldq $0, 256($sp) /* get PS */ stq $1, 8($sp) stq $2, 16($sp) stq $3, 24($sp) and $0, 8, $0 /* user mode? */ stq $4, 32($sp) bne $0, entUnaUser /* yup -> do user-level unaligned fault */ stq $5, 40($sp) stq $6, 48($sp) stq $7, 56($sp) stq $8, 64($sp) stq $9, 72($sp) stq $10, 80($sp) stq $11, 88($sp) stq $12, 96($sp) stq $13, 104($sp) stq $14, 112($sp) stq $15, 120($sp) /* 16-18 PAL-saved */ stq $19, 152($sp) stq $20, 160($sp) stq $21, 168($sp) stq $22, 176($sp) stq $23, 184($sp) stq $24, 192($sp) stq $25, 200($sp) stq $26, 208($sp) stq $27, 216($sp) stq $28, 224($sp) mov $sp, $19 stq $gp, 232($sp) .cfi_rel_offset $1, 1*8 .cfi_rel_offset $2, 2*8 .cfi_rel_offset $3, 3*8 .cfi_rel_offset $4, 4*8 .cfi_rel_offset $5, 5*8 .cfi_rel_offset $6, 6*8 .cfi_rel_offset $7, 7*8 .cfi_rel_offset $8, 8*8 .cfi_rel_offset $9, 9*8 .cfi_rel_offset $10, 10*8 .cfi_rel_offset $11, 11*8 .cfi_rel_offset $12, 12*8 .cfi_rel_offset $13, 13*8 .cfi_rel_offset $14, 14*8 .cfi_rel_offset $15, 15*8 .cfi_rel_offset $19, 19*8 .cfi_rel_offset $20, 20*8 .cfi_rel_offset $21, 21*8 .cfi_rel_offset $22, 22*8 .cfi_rel_offset $23, 23*8 .cfi_rel_offset $24, 24*8 .cfi_rel_offset $25, 25*8 .cfi_rel_offset $26, 26*8 .cfi_rel_offset $27, 27*8 .cfi_rel_offset $28, 28*8 .cfi_rel_offset $29, 29*8 lda $8, 0x3fff stq $31, 248($sp) bic $sp, $8, $8 jsr $26, do_entUna ldq $0, 0($sp) ldq $1, 8($sp) ldq $2, 16($sp) ldq $3, 24($sp) ldq $4, 32($sp) ldq $5, 40($sp) ldq $6, 48($sp) ldq $7, 56($sp) ldq $8, 64($sp) ldq $9, 72($sp) ldq $10, 80($sp) ldq $11, 88($sp) ldq $12, 96($sp) ldq $13, 104($sp) ldq $14, 112($sp) ldq $15, 120($sp) /* 16-18 PAL-saved */ ldq $19, 152($sp) ldq $20, 160($sp) ldq $21, 168($sp) ldq $22, 176($sp) ldq $23, 184($sp) ldq $24, 192($sp) ldq $25, 200($sp) ldq $26, 208($sp) ldq $27, 216($sp) ldq $28, 224($sp) ldq $gp, 232($sp) lda $sp, 256($sp) .cfi_restore $1 .cfi_restore $2 .cfi_restore $3 .cfi_restore $4 .cfi_restore $5 .cfi_restore $6 .cfi_restore $7 .cfi_restore $8 .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_restore $19 .cfi_restore $20 .cfi_restore $21 .cfi_restore $22 .cfi_restore $23 .cfi_restore $24 .cfi_restore $25 .cfi_restore $26 .cfi_restore $27 .cfi_restore $28 .cfi_restore $29 .cfi_adjust_cfa_offset -256 call_pal PAL_rti .align 4 entUnaUser: .cfi_restore_state ldq $0, 0($sp) /* restore original $0 */ lda $sp, 256($sp) /* pop entUna's stack frame */ .cfi_restore $0 .cfi_adjust_cfa_offset -256 SAVE_ALL /* setup normal kernel stack */ lda $sp, -56($sp) .cfi_adjust_cfa_offset 56 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) stq $12, 24($sp) stq $13, 32($sp) stq $14, 40($sp) stq $15, 48($sp) .cfi_rel_offset $9, 0 .cfi_rel_offset $10, 8 .cfi_rel_offset $11, 16 .cfi_rel_offset $12, 24 .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 lda $8, 0x3fff addq $sp, 56, $19 bic $sp, $8, $8 jsr $26, do_entUnaUser ldq $9, 0($sp) ldq $10, 8($sp) ldq $11, 16($sp) ldq $12, 24($sp) ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) lda $sp, 56($sp) .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_adjust_cfa_offset -56 br ret_from_sys_call CFI_END_OSF_FRAME entUna CFI_START_OSF_FRAME entDbg SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $16 jsr $31, do_entDbg CFI_END_OSF_FRAME entDbg /* * The system call entry point is special. Most importantly, it looks * like a function call to userspace as far as clobbered registers. We * do preserve the argument registers (for syscall restarts) and $26 * (for leaf syscall functions). * * So much for theory. We don't take advantage of this yet. * * Note that a0-a2 are not saved by PALcode as with the other entry points. */ .align 4 .globl entSys .type entSys, @function .cfi_startproc simple .cfi_return_column 64 .cfi_def_cfa $sp, 48 .cfi_rel_offset 64, 8 .cfi_rel_offset $gp, 16 entSys: SAVE_ALL lda $8, 0x3fff bic $sp, $8, $8 lda $4, NR_SYSCALLS($31) stq $16, SP_OFF+24($sp) lda $5, sys_call_table lda $27, sys_ni_syscall cmpult $0, $4, $4 ldl $3, TI_FLAGS($8) stq $17, SP_OFF+32($sp) s8addq $0, $5, $5 stq $18, SP_OFF+40($sp) .cfi_rel_offset $16, SP_OFF+24 .cfi_rel_offset $17, SP_OFF+32 .cfi_rel_offset $18, SP_OFF+40 #ifdef CONFIG_AUDITSYSCALL lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and $3, $6, $3 bne $3, strace #else blbs $3, strace /* check for SYSCALL_TRACE in disguise */ #endif beq $4, 1f ldq $27, 0($5) 1: jsr $26, ($27), sys_ni_syscall ldgp $gp, 0($26) blt $0, $syscall_error /* the call failed */ stq $0, 0($sp) stq $31, 72($sp) /* a3=0 => no error */ .align 4 .globl ret_from_sys_call ret_from_sys_call: cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ ldq $0, SP_OFF($sp) and $0, 8, $0 beq $0, ret_to_kernel ret_to_user: /* Make sure need_resched and sigpending don't change between sampling and the rti. */ lda $16, 7 call_pal PAL_swpipl ldl $17, TI_FLAGS($8) and $17, _TIF_WORK_MASK, $2 bne $2, work_pending restore_all: .cfi_remember_state RESTORE_ALL call_pal PAL_rti ret_to_kernel: .cfi_restore_state lda $16, 7 call_pal PAL_swpipl br restore_all .align 3 $syscall_error: /* * Some system calls (e.g., ptrace) can return arbitrary * values which might normally be mistaken as error numbers. * Those functions must zero $0 (v0) directly in the stack * frame to indicate that a negative return value wasn't an * error number.. */ ldq $18, 0($sp) /* old syscall nr (zero if success) */ beq $18, $ret_success ldq $19, 72($sp) /* .. and this a3 */ subq $31, $0, $0 /* with error in v0 */ addq $31, 1, $1 /* set a3 for errno return */ stq $0, 0($sp) mov $31, $26 /* tell "ret_from_sys_call" we can restart */ stq $1, 72($sp) /* a3 for return */ br ret_from_sys_call $ret_success: stq $0, 0($sp) stq $31, 72($sp) /* a3=0 => no error */ br ret_from_sys_call /* * Do all cleanup when returning from all interrupts and system calls. * * Arguments: * $8: current. * $17: TI_FLAGS. * $18: The old syscall number, or zero if this is not a return * from a syscall that errored and is possibly restartable. * $19: The old a3 value */ .align 4 .type work_pending, @function work_pending: and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2 bne $2, $work_notifysig $work_resched: /* * We can get here only if we returned from syscall without SIGPENDING * or got through work_notifysig already. Either case means no syscall * restarts for us, so let $18 and $19 burn. */ jsr $26, schedule mov 0, $18 br ret_to_user $work_notifysig: mov $sp, $16 DO_SWITCH_STACK jsr $26, do_work_pending UNDO_SWITCH_STACK br restore_all /* * PTRACE syscall handler */ .align 4 .type strace, @function strace: /* set up signal stack, call syscall_trace */ DO_SWITCH_STACK jsr $26, syscall_trace_enter /* returns the syscall number */ UNDO_SWITCH_STACK /* get the arguments back.. */ ldq $16, SP_OFF+24($sp) ldq $17, SP_OFF+32($sp) ldq $18, SP_OFF+40($sp) ldq $19, 72($sp) ldq $20, 80($sp) ldq $21, 88($sp) /* get the system call pointer.. */ lda $1, NR_SYSCALLS($31) lda $2, sys_call_table lda $27, sys_ni_syscall cmpult $0, $1, $1 s8addq $0, $2, $2 beq $1, 1f ldq $27, 0($2) 1: jsr $26, ($27), sys_gettimeofday ret_from_straced: ldgp $gp, 0($26) /* check return.. */ blt $0, $strace_error /* the call failed */ stq $31, 72($sp) /* a3=0 => no error */ $strace_success: stq $0, 0($sp) /* save return value */ DO_SWITCH_STACK jsr $26, syscall_trace_leave UNDO_SWITCH_STACK br $31, ret_from_sys_call .align 3 $strace_error: ldq $18, 0($sp) /* old syscall nr (zero if success) */ beq $18, $strace_success ldq $19, 72($sp) /* .. and this a3 */ subq $31, $0, $0 /* with error in v0 */ addq $31, 1, $1 /* set a3 for errno return */ stq $0, 0($sp) stq $1, 72($sp) /* a3 for return */ DO_SWITCH_STACK mov $18, $9 /* save old syscall number */ mov $19, $10 /* save old a3 */ jsr $26, syscall_trace_leave mov $9, $18 mov $10, $19 UNDO_SWITCH_STACK mov $31, $26 /* tell "ret_from_sys_call" we can restart */ br ret_from_sys_call CFI_END_OSF_FRAME entSys /* * Save and restore the switch stack -- aka the balance of the user context. */ .align 4 .type do_switch_stack, @function .cfi_startproc simple .cfi_return_column 64 .cfi_def_cfa $sp, 0 .cfi_register 64, $1 do_switch_stack: lda $sp, -SWITCH_STACK_SIZE($sp) .cfi_adjust_cfa_offset SWITCH_STACK_SIZE stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) stq $12, 24($sp) stq $13, 32($sp) stq $14, 40($sp) stq $15, 48($sp) stq $26, 56($sp) stt $f0, 64($sp) stt $f1, 72($sp) stt $f2, 80($sp) stt $f3, 88($sp) stt $f4, 96($sp) stt $f5, 104($sp) stt $f6, 112($sp) stt $f7, 120($sp) stt $f8, 128($sp) stt $f9, 136($sp) stt $f10, 144($sp) stt $f11, 152($sp) stt $f12, 160($sp) stt $f13, 168($sp) stt $f14, 176($sp) stt $f15, 184($sp) stt $f16, 192($sp) stt $f17, 200($sp) stt $f18, 208($sp) stt $f19, 216($sp) stt $f20, 224($sp) stt $f21, 232($sp) stt $f22, 240($sp) stt $f23, 248($sp) stt $f24, 256($sp) stt $f25, 264($sp) stt $f26, 272($sp) stt $f27, 280($sp) mf_fpcr $f0 # get fpcr stt $f28, 288($sp) stt $f29, 296($sp) stt $f30, 304($sp) stt $f0, 312($sp) # save fpcr in slot of $f31 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. ret $31, ($1), 1 .cfi_endproc .size do_switch_stack, .-do_switch_stack .align 4 .type undo_switch_stack, @function .cfi_startproc simple .cfi_def_cfa $sp, 0 .cfi_register 64, $1 undo_switch_stack: ldq $9, 0($sp) ldq $10, 8($sp) ldq $11, 16($sp) ldq $12, 24($sp) ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) ldq $26, 56($sp) ldt $f30, 312($sp) # get saved fpcr ldt $f0, 64($sp) ldt $f1, 72($sp) ldt $f2, 80($sp) ldt $f3, 88($sp) mt_fpcr $f30 # install saved fpcr ldt $f4, 96($sp) ldt $f5, 104($sp) ldt $f6, 112($sp) ldt $f7, 120($sp) ldt $f8, 128($sp) ldt $f9, 136($sp) ldt $f10, 144($sp) ldt $f11, 152($sp) ldt $f12, 160($sp) ldt $f13, 168($sp) ldt $f14, 176($sp) ldt $f15, 184($sp) ldt $f16, 192($sp) ldt $f17, 200($sp) ldt $f18, 208($sp) ldt $f19, 216($sp) ldt $f20, 224($sp) ldt $f21, 232($sp) ldt $f22, 240($sp) ldt $f23, 248($sp) ldt $f24, 256($sp) ldt $f25, 264($sp) ldt $f26, 272($sp) ldt $f27, 280($sp) ldt $f28, 288($sp) ldt $f29, 296($sp) ldt $f30, 304($sp) lda $sp, SWITCH_STACK_SIZE($sp) ret $31, ($1), 1 .cfi_endproc .size undo_switch_stack, .-undo_switch_stack /* * The meat of the context switch code. */ .align 4 .globl alpha_switch_to .type alpha_switch_to, @function .cfi_startproc alpha_switch_to: DO_SWITCH_STACK call_pal PAL_swpctx lda $8, 0x3fff UNDO_SWITCH_STACK bic $sp, $8, $8 mov $17, $0 ret .cfi_endproc .size alpha_switch_to, .-alpha_switch_to /* * New processes begin life here. */ .globl ret_from_fork .align 4 .ent ret_from_fork ret_from_fork: lda $26, ret_from_sys_call mov $17, $16 jmp $31, schedule_tail .end ret_from_fork /* * ... and new kernel threads - here */ .align 4 .globl ret_from_kernel_thread .ent ret_from_kernel_thread ret_from_kernel_thread: mov $17, $16 jsr $26, schedule_tail mov $9, $27 mov $10, $16 jsr $26, ($9) br $31, ret_to_user .end ret_from_kernel_thread /* * Special system calls. Most of these are special in that they either * have to play switch_stack games. */ .macro fork_like name .align 4 .globl alpha_\name .ent alpha_\name alpha_\name: .prologue 0 bsr $1, do_switch_stack jsr $26, sys_\name ldq $26, 56($sp) lda $sp, SWITCH_STACK_SIZE($sp) ret .end alpha_\name .endm fork_like fork fork_like vfork fork_like clone .macro sigreturn_like name .align 4 .globl sys_\name .ent sys_\name sys_\name: .prologue 0 lda $9, ret_from_straced cmpult $26, $9, $9 lda $sp, -SWITCH_STACK_SIZE($sp) jsr $26, do_\name bne $9, 1f jsr $26, syscall_trace_leave 1: br $1, undo_switch_stack br ret_from_sys_call .end sys_\name .endm sigreturn_like sigreturn sigreturn_like rt_sigreturn .align 4 .globl alpha_syscall_zero .ent alpha_syscall_zero alpha_syscall_zero: .prologue 0 /* Special because it needs to do something opposite to force_successful_syscall_return(). We use the saved syscall number for that, zero meaning "not an error". That works nicely, but for real syscall 0 we need to make sure that this logics doesn't get confused. Store a non-zero there - -ENOSYS we need in register for our return value will do just fine. */ lda $0, -ENOSYS unop stq $0, 0($sp) ret .end alpha_syscall_zero
aixcc-public/challenge-001-exemplar-source
1,431
arch/alpha/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #define EMITS_PT_NOTE #define RO_EXCEPTION_TABLE_ALIGN 16 #include <asm-generic/vmlinux.lds.h> #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/page.h> #include <asm/setup.h> OUTPUT_FORMAT("elf64-alpha") OUTPUT_ARCH(alpha) ENTRY(__start) PHDRS { text PT_LOAD; note PT_NOTE; } jiffies = jiffies_64; SECTIONS { #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS . = 0xfffffc0000310000; #else . = 0xfffffc0001010000; #endif _text = .; /* Text and read-only data */ .text : { HEAD_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) } :text swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ RO_DATA(4096) /* Will be freed after init */ __init_begin = ALIGN(PAGE_SIZE); INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) PERCPU_SECTION(L1_CACHE_BYTES) /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page needed for the THREAD_SIZE aligned init_task gets freed after init */ . = ALIGN(THREAD_SIZE); __init_end = .; /* Freed after init ends here */ _sdata = .; /* Start of rw data section */ _data = .; RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .got : { *(.got) } .sdata : { *(.sdata) } _edata = .; /* End of data section */ BSS_SECTION(0, 0, 0) _end = .; .mdebug 0 : { *(.mdebug) } .note 0 : { *(.note) } STABS_DEBUG DWARF_DEBUG ELF_DETAILS DISCARDS }
aixcc-public/challenge-001-exemplar-source
2,491
arch/alpha/lib/clear_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/clear_user.S * Contributed by Richard Henderson <rth@tamu.edu> * * Zero user space, handling exceptions as we go. * * We have to make sure that $0 is always up-to-date and contains the * right "bytes left to zero" value (and that it is updated only _after_ * a successful copy). There is also some rather minor exception setup * stuff. */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exception-99b($31); \ .previous .set noat .set noreorder .align 4 .globl __clear_user .ent __clear_user .frame $30, 0, $26 .prologue 0 $loop: and $1, 3, $4 # e0 : beq $4, 1f # .. e1 : 0: EX( stq_u $31, 0($16) ) # e0 : zero one word subq $0, 8, $0 # .. e1 : subq $4, 1, $4 # e0 : addq $16, 8, $16 # .. e1 : bne $4, 0b # e1 : unop # : 1: bic $1, 3, $1 # e0 : beq $1, $tail # .. e1 : 2: EX( stq_u $31, 0($16) ) # e0 : zero four words subq $0, 8, $0 # .. e1 : EX( stq_u $31, 8($16) ) # e0 : subq $0, 8, $0 # .. e1 : EX( stq_u $31, 16($16) ) # e0 : subq $0, 8, $0 # .. e1 : EX( stq_u $31, 24($16) ) # e0 : subq $0, 8, $0 # .. e1 : subq $1, 4, $1 # e0 : addq $16, 32, $16 # .. e1 : bne $1, 2b # e1 : $tail: bne $2, 1f # e1 : is there a tail to do? ret $31, ($26), 1 # .. e1 : 1: EX( ldq_u $5, 0($16) ) # e0 : clr $0 # .. e1 : nop # e1 : mskqh $5, $0, $5 # e0 : EX( stq_u $5, 0($16) ) # e0 : ret $31, ($26), 1 # .. e1 : __clear_user: and $17, $17, $0 and $16, 7, $4 # e0 : find dest misalignment beq $0, $zerolength # .. e1 : addq $0, $4, $1 # e0 : bias counter and $1, 7, $2 # e1 : number of bytes in tail srl $1, 3, $1 # e0 : beq $4, $loop # .. e1 : EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in beq $1, $oneword # .. e1 : sub-word store? mskql $5, $16, $5 # e0 : take care of misaligned head addq $16, 8, $16 # .. e1 : EX( stq_u $5, -8($16) ) # e0 : addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment subq $1, 1, $1 # e0 : subq $0, 8, $0 # .. e1 : br $loop # e1 : unop # : $oneword: mskql $5, $16, $4 # e0 : mskqh $5, $2, $5 # e0 : or $5, $4, $5 # e1 : EX( stq_u $5, 0($16) ) # e0 : clr $0 # .. e1 : $zerolength: $exception: ret $31, ($26), 1 # .. e1 : .end __clear_user EXPORT_SYMBOL(__clear_user)
aixcc-public/challenge-001-exemplar-source
4,406
arch/alpha/lib/divide.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/divide.S * * (C) 1995 Linus Torvalds * * Alpha division.. */ /* * The alpha chip doesn't provide hardware division, so we have to do it * by hand. The compiler expects the functions * * __divqu: 64-bit unsigned long divide * __remqu: 64-bit unsigned long remainder * __divqs/__remqs: signed 64-bit * __divlu/__remlu: unsigned 32-bit * __divls/__remls: signed 32-bit * * These are not normal C functions: instead of the normal * calling sequence, these expect their arguments in registers * $24 and $25, and return the result in $27. Register $28 may * be clobbered (assembly temporary), anything else must be saved. * * In short: painful. * * This is a rather simple bit-at-a-time algorithm: it's very good * at dividing random 64-bit numbers, but the more usual case where * the divisor is small is handled better by the DEC algorithm * using lookup tables. This uses much less memory, though, and is * nicer on the cache.. Besides, I don't know the copyright status * of the DEC code. */ /* * My temporaries: * $0 - current bit * $1 - shifted divisor * $2 - modulus/quotient * * $23 - return address * $24 - dividend * $25 - divisor * * $27 - quotient/modulus * $28 - compare status */ #include <asm/export.h> #define halt .long 0 /* * Select function type and registers */ #define mask $0 #define divisor $1 #define compare $28 #define tmp1 $3 #define tmp2 $4 #ifdef DIV #define DIV_ONLY(x,y...) x,##y #define MOD_ONLY(x,y...) #define func(x) __div##x #define modulus $2 #define quotient $27 #define GETSIGN(x) xor $24,$25,x #define STACK 48 #else #define DIV_ONLY(x,y...) #define MOD_ONLY(x,y...) x,##y #define func(x) __rem##x #define modulus $27 #define quotient $2 #define GETSIGN(x) bis $24,$24,x #define STACK 32 #endif /* * For 32-bit operations, we need to extend to 64-bit */ #ifdef INTSIZE #define ufunction func(lu) #define sfunction func(l) #define LONGIFY(x) zapnot x,15,x #define SLONGIFY(x) addl x,0,x #else #define ufunction func(qu) #define sfunction func(q) #define LONGIFY(x) #define SLONGIFY(x) #endif .set noat .align 3 .globl ufunction .ent ufunction ufunction: subq $30,STACK,$30 .frame $30,STACK,$23 .prologue 0 7: stq $1, 0($30) bis $25,$25,divisor stq $2, 8($30) bis $24,$24,modulus stq $0,16($30) bis $31,$31,quotient LONGIFY(divisor) stq tmp1,24($30) LONGIFY(modulus) bis $31,1,mask DIV_ONLY(stq tmp2,32($30)) beq divisor, 9f /* div by zero */ #ifdef INTSIZE /* * shift divisor left, using 3-bit shifts for * 32-bit divides as we can't overflow. Three-bit * shifts will result in looping three times less * here, but can result in two loops more later. * Thus using a large shift isn't worth it (and * s8add pairs better than a sll..) */ 1: cmpult divisor,modulus,compare s8addq divisor,$31,divisor s8addq mask,$31,mask bne compare,1b #else 1: cmpult divisor,modulus,compare blt divisor, 2f addq divisor,divisor,divisor addq mask,mask,mask bne compare,1b unop #endif /* ok, start to go right again.. */ 2: DIV_ONLY(addq quotient,mask,tmp2) srl mask,1,mask cmpule divisor,modulus,compare subq modulus,divisor,tmp1 DIV_ONLY(cmovne compare,tmp2,quotient) srl divisor,1,divisor cmovne compare,tmp1,modulus bne mask,2b 9: ldq $1, 0($30) ldq $2, 8($30) ldq $0,16($30) ldq tmp1,24($30) DIV_ONLY(ldq tmp2,32($30)) addq $30,STACK,$30 ret $31,($23),1 .end ufunction EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but * it's needed in some circumstances. There are different ways to * handle this, really. This does: * -a / b = a / -b = -(a / b) * -a % b = -(a % b) * a % -b = a % b * which is probably not the best solution, but at least should * have the property that (x/y)*y + (x%y) = x. */ .align 3 .globl sfunction .ent sfunction sfunction: subq $30,STACK,$30 .frame $30,STACK,$23 .prologue 0 bis $24,$25,$28 SLONGIFY($28) bge $28,7b stq $24,0($30) subq $31,$24,$28 stq $25,8($30) cmovlt $24,$28,$24 /* abs($24) */ stq $23,16($30) subq $31,$25,$28 stq tmp1,24($30) cmovlt $25,$28,$25 /* abs($25) */ unop bsr $23,ufunction ldq $24,0($30) ldq $25,8($30) GETSIGN($28) subq $31,$27,tmp1 SLONGIFY($28) ldq $23,16($30) cmovlt $28,tmp1,$27 ldq tmp1,24($30) addq $30,STACK,$30 ret $31,($23),1 .end sfunction EXPORT_SYMBOL(sfunction)
aixcc-public/challenge-001-exemplar-source
2,880
arch/alpha/lib/ev67-strchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strchr.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Return the address of a given character within a null-terminated * string, or null if it is not found. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 4 .globl strchr .ent strchr strchr: .frame sp, 0, ra .prologue 0 ldq_u t0, 0(a0) # L : load first quadword Latency=3 and a1, 0xff, t3 # E : 00000000000000ch insbl a1, 1, t5 # U : 000000000000ch00 insbl a1, 7, a2 # U : ch00000000000000 insbl t3, 6, a3 # U : 00ch000000000000 or t5, t3, a1 # E : 000000000000chch andnot a0, 7, v0 # E : align our loop pointer lda t4, -1 # E : build garbage mask mskqh t4, a0, t4 # U : only want relevant part of first quad or a2, a3, a2 # E : chch000000000000 inswl a1, 2, t5 # E : 00000000chch0000 inswl a1, 4, a3 # E : 0000chch00000000 or a1, a2, a1 # E : chch00000000chch or a3, t5, t5 # E : 0000chchchch0000 cmpbge zero, t0, t2 # E : bits set iff byte == zero cmpbge zero, t4, t4 # E : bits set iff byte is garbage /* This quad is _very_ serialized. Lots of stalling happens */ or t5, a1, a1 # E : chchchchchchchch xor t0, a1, t1 # E : make bytes == c zero cmpbge zero, t1, t3 # E : bits set iff byte == c or t2, t3, t0 # E : bits set iff char match or zero match andnot t0, t4, t0 # E : clear garbage bits cttz t0, a2 # U0 : speculative (in case we get a match) nop # E : bne t0, $found # U : /* * Yuk. This loop is going to stall like crazy waiting for the * data to be loaded. Not much can be done about it unless it's * unrolled multiple times - is that safe to do in kernel space? * Or would exception handling recovery code do the trick here? */ $loop: ldq t0, 8(v0) # L : Latency=3 addq v0, 8, v0 # E : xor t0, a1, t1 # E : cmpbge zero, t0, t2 # E : bits set iff byte == 0 cmpbge zero, t1, t3 # E : bits set iff byte == c or t2, t3, t0 # E : cttz t3, a2 # U0 : speculative (in case we get a match) beq t0, $loop # U : $found: negq t0, t1 # E : clear all but least set bit and t0, t1, t0 # E : and t0, t3, t1 # E : bit set iff byte was the char addq v0, a2, v0 # E : Add in the bit number from above cmoveq t1, $31, v0 # E : Two mapping slots, latency = 2 nop nop ret # L0 : .end strchr EXPORT_SYMBOL(strchr)
aixcc-public/challenge-001-exemplar-source
8,994
arch/alpha/lib/stxcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/stxcpy.S * Contributed by Richard Henderson (rth@tamu.edu) * * Copy a null-terminated string from SRC to DST. * * This is an internal routine used by strcpy, stpcpy, and strcat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * * On output: * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * * Furthermore, v0, a3-a5, t11, and t12 are untouched. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxcpy_aligned .align 3 stxcpy_aligned: .frame sp, 0, t9 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # e1 : build a mask against false zero mskqh t2, a1, t2 # e0 : detection in the src word mskqh t1, a1, t3 # e0 : ornot t1, t2, t2 # .. e1 : mskql t0, a1, t0 # e0 : assemble the first output word cmpbge zero, t2, t8 # .. e1 : bits set iff null found or t0, t3, t1 # e0 : bne t8, $a_eos # .. e1 : /* On entry to this basic block: t0 == the first destination word for masking back in t1 == a source word not containing a null. */ $a_loop: stq_u t1, 0(a0) # e0 : addq a0, 8, a0 # .. e1 : ldq_u t1, 0(a1) # e0 : addq a1, 8, a1 # .. e1 : cmpbge zero, t1, t8 # e0 (stall) beq t8, $a_loop # .. e1 (zdb) /* Take care of the final (partial) word store. On entry to this basic block we have: t1 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t6 # e0 : find low bit set and t8, t6, t12 # e1 (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # e0 : bne t6, 1f # .. e1 (zdb) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t0, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : zapnot t1, t6, t1 # e0 : clear src bytes >= null or t12, t6, t8 # .. e1 : zap t0, t8, t0 # e0 : clear dst bytes <= null or t0, t1, t1 # e1 : 1: stq_u t1, 0(a0) # e0 : ret (t9) # .. e1 : .end stxcpy_aligned .align 3 .ent __stxcpy .globl __stxcpy __stxcpy: .frame sp, 0, t9 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t0 # e0 : unop # : and t0, 7, t0 # e0 : bne t0, $unaligned # .. e1 : /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # e0 : load first src word and a0, 7, t0 # .. e1 : take care not to load a word ... addq a1, 8, a1 # e0 : beq t0, stxcpy_aligned # .. e1 : ... if we wont need it ldq_u t0, 0(a0) # e0 : br stxcpy_aligned # .. e1 : /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 3 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, for masking back in, if needed else 0 t1 == the low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # e0 : addq a1, 8, a1 # .. e1 : extql t1, a1, t1 # e0 : extqh t2, a1, t4 # e0 : mskql t0, a0, t0 # e0 : or t1, t4, t1 # .. e1 : mskqh t1, a0, t1 # e0 : or t0, t1, t1 # e1 : or t1, t6, t6 # e0 : cmpbge zero, t6, t8 # .. e1 : lda t6, -1 # e0 : for masking just below bne t8, $u_final # .. e1 : mskql t6, a1, t6 # e0 : mask out the bits we have or t6, t2, t2 # e1 : already extracted before cmpbge zero, t2, t8 # e0 : testing eos bne t8, $u_late_head_exit # .. e1 (zdb) /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ stq_u t1, 0(a0) # e0 : store first output word addq a0, 8, a0 # .. e1 : extql t2, a1, t0 # e0 : position ho-bits of lo word ldq_u t2, 8(a1) # .. e1 : read next high-order source word addq a1, 8, a1 # e0 : cmpbge zero, t2, t8 # .. e1 : nop # e0 : bne t8, $u_eos # .. e1 : /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 3 $u_loop: extqh t2, a1, t1 # e0 : extract high bits for current word addq a1, 8, a1 # .. e1 : extql t2, a1, t3 # e0 : extract low bits for next time addq a0, 8, a0 # .. e1 : or t0, t1, t1 # e0 : current dst word now complete ldq_u t2, 0(a1) # .. e1 : load high word for next time stq_u t1, -8(a0) # e0 : save the current word mov t3, t0 # .. e1 : cmpbge zero, t2, t8 # e0 : test new word for eos beq t8, $u_loop # .. e1 : /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: extqh t2, a1, t1 # e0 : or t0, t1, t1 # e1 : first (partial) source word complete cmpbge zero, t1, t8 # e0 : is the null in this first bit? bne t8, $u_final # .. e1 (zdb) $u_late_head_exit: stq_u t1, 0(a0) # e0 : the null was in the high-order bits addq a0, 8, a0 # .. e1 : extql t2, a1, t1 # e0 : cmpbge zero, t1, t8 # .. e1 : /* Take care of a final (probably partial) result word. On entry to this basic block: t1 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # e0 : isolate low bit set and t6, t8, t12 # e1 : and t12, 0x80, t6 # e0 : avoid dest word load if we can bne t6, 1f # .. e1 (zdb) ldq_u t0, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : or t6, t12, t8 # e0 : zapnot t1, t6, t1 # .. e1 : kill source bytes >= null zap t0, t8, t0 # e0 : kill dest bytes <= null or t0, t1, t1 # e1 : 1: stq_u t1, 0(a0) # e0 : ret (t9) # .. e1 : /* Unaligned copy entry point. */ .align 3 $unaligned: ldq_u t1, 0(a1) # e0 : load first source word and a0, 7, t4 # .. e1 : find dest misalignment and a1, 7, t5 # e0 : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # .. e1 : mov zero, t6 # e0 : beq t4, 1f # .. e1 : ldq_u t0, 0(a0) # e0 : lda t6, -1 # .. e1 : mskql t6, a0, t6 # e0 : 1: subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ cmplt t4, t5, t12 # e0 : beq t12, $u_head # .. e1 (zdb) lda t2, -1 # e1 : mask out leading garbage in source mskqh t2, t5, t2 # e0 : nop # e0 : ornot t1, t2, t3 # .. e1 : cmpbge zero, t3, t8 # e0 : is there a zero? beq t8, $u_head # .. e1 (zdb) /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # e0 : negq t8, t6 # .. e1 : build bitmask of bytes <= zero and t6, t8, t12 # e0 : and a1, 7, t5 # .. e1 : subq t12, 1, t6 # e0 : or t6, t12, t8 # e1 : srl t12, t5, t12 # e0 : adjust final null return value zapnot t2, t8, t2 # .. e1 : prepare source word; mirror changes and t1, t2, t1 # e1 : to source validity mask extql t2, a1, t2 # .. e0 : extql t1, a1, t1 # e0 : andnot t0, t2, t0 # .. e1 : zero place for source to reside or t0, t1, t1 # e1 : and put it there stq_u t1, 0(a0) # .. e0 : ret (t9) # e1 : .end __stxcpy
aixcc-public/challenge-001-exemplar-source
16,394
arch/alpha/lib/ev6-memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-memset.S * * This is an efficient (and relatively small) implementation of the C library * "memset()" function for the 21264 implementation of Alpha. * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * The algorithm for the leading and trailing quadwords remains the same, * however the loop has been unrolled to enable better memory throughput, * and the code has been replicated for each of the entry points: __memset * and __memset16 to permit better scheduling to eliminate the stalling * encountered during the mask replication. * A future enhancement might be to put in a byte store loop for really * small (say < 32 bytes) memset()s. Whether or not that change would be * a win in the kernel would depend upon the contextual usage. * WARNING: Maintaining this is going to be more work than the above version, * as fixes will need to be made in multiple places. The performance gain * is worth it. */ #include <asm/export.h> .set noat .set noreorder .text .globl memset .globl __memset .globl ___memset .globl __memset16 .globl __constant_c_memset .ent ___memset .align 5 ___memset: .frame $30,0,$26,0 .prologue 0 /* * Serious stalling happens. The only way to mitigate this is to * undertake a major re-write to interleave the constant materialization * with other parts of the fall-through code. This is important, even * though it makes maintenance tougher. * Do this later. */ and $17,255,$1 # E : 00000000000000ch insbl $17,1,$2 # U : 000000000000ch00 bis $16,$16,$0 # E : return value ble $18,end_b # U : zero length requested? addq $18,$16,$6 # E : max address to write to bis $1,$2,$17 # E : 000000000000chch insbl $1,2,$3 # U : 0000000000ch0000 insbl $1,3,$4 # U : 00000000ch000000 or $3,$4,$3 # E : 00000000chch0000 inswl $17,4,$5 # U : 0000chch00000000 xor $16,$6,$1 # E : will complete write be within one quadword? inswl $17,6,$2 # U : chch000000000000 or $17,$3,$17 # E : 00000000chchchch or $2,$5,$2 # E : chchchch00000000 bic $1,7,$1 # E : fit within a single quadword? and $16,7,$3 # E : Target addr misalignment or $17,$2,$17 # E : chchchchchchchch beq $1,within_quad_b # U : nop # E : beq $3,aligned_b # U : target is 0mod8 /* * Target address is misaligned, and won't fit within a quadword */ ldq_u $4,0($16) # L : Fetch first partial bis $16,$16,$5 # E : Save the address insql $17,$16,$2 # U : Insert new bytes subq $3,8,$3 # E : Invert (for addressing uses) addq $18,$3,$18 # E : $18 is new count ($3 is negative) mskql $4,$16,$4 # U : clear relevant parts of the quad subq $16,$3,$16 # E : $16 is new aligned destination bis $2,$4,$1 # E : Final bytes nop stq_u $1,0($5) # L : Store result nop nop .align 4 aligned_b: /* * We are now guaranteed to be quad aligned, with at least * one partial quad to write. */ sra $18,3,$3 # U : Number of remaining quads to write and $18,7,$18 # E : Number of trailing bytes to write bis $16,$16,$5 # E : Save dest address beq $3,no_quad_b # U : tail stuff only /* * it's worth the effort to unroll this and use wh64 if possible * Lifted a bunch of code from clear_user.S * At this point, entry values are: * $16 Current destination address * $5 A copy of $16 * $6 The max quadword address to write to * $18 Number trailer bytes * $3 Number quads to write */ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop) subq $3, 16, $4 # E : Only try to unroll if > 128 bytes subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64) blt $4, loop_b # U : /* * We know we've got at least 16 quads, minimum of one trip * through unrolled loop. Do a quad at a time to get us 0mod64 * aligned. */ nop # E : nop # E : nop # E : beq $1, $bigalign_b # U : $alignmod64_b: stq $17, 0($5) # L : subq $3, 1, $3 # E : For consistency later addq $1, 8, $1 # E : Increment towards zero for alignment addq $5, 8, $4 # E : Initial wh64 address (filler instruction) nop nop addq $5, 8, $5 # E : Inc address blt $1, $alignmod64_b # U : $bigalign_b: /* * $3 - number quads left to go * $5 - target address (aligned 0mod64) * $17 - mask of stuff to store * Scratch registers available: $7, $2, $4, $1 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ $do_wh64_b: wh64 ($4) # L1 : memory subsystem write hint subq $3, 24, $2 # E : For determining future wh64 addresses stq $17, 0($5) # L : nop # E : addq $5, 128, $4 # E : speculative target of next wh64 stq $17, 8($5) # L : stq $17, 16($5) # L : addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr) stq $17, 24($5) # L : stq $17, 32($5) # L : cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle nop stq $17, 40($5) # L : stq $17, 48($5) # L : subq $3, 16, $2 # E : Repeat the loop at least once more? nop stq $17, 56($5) # L : addq $5, 64, $5 # E : subq $3, 8, $3 # E : bge $2, $do_wh64_b # U : nop nop nop beq $3, no_quad_b # U : Might have finished already .align 4 /* * Simple loop for trailing quadwords, or for small amounts * of data (where we can't use an unrolled loop and wh64) */ loop_b: stq $17,0($5) # L : subq $3,1,$3 # E : Decrement number quads left addq $5,8,$5 # E : Inc address bne $3,loop_b # U : more? no_quad_b: /* * Write 0..7 trailing bytes. */ nop # E : beq $18,end_b # U : All done? ldq $7,0($5) # L : mskqh $7,$6,$2 # U : Mask final quad insqh $17,$6,$4 # U : New bits bis $2,$4,$1 # E : Put it all together stq $1,0($5) # L : And back to memory ret $31,($26),1 # L0 : within_quad_b: ldq_u $1,0($16) # L : insql $17,$16,$2 # U : New bits mskql $1,$16,$4 # U : Clear old bis $2,$4,$2 # E : New result mskql $2,$6,$4 # U : mskqh $1,$6,$2 # U : bis $2,$4,$1 # E : stq_u $1,0($16) # L : end_b: nop nop nop ret $31,($26),1 # L0 : .end ___memset EXPORT_SYMBOL(___memset) /* * This is the original body of code, prior to replication and * rescheduling. Leave it here, as there may be calls to this * entry point. */ .align 4 .ent __constant_c_memset __constant_c_memset: .frame $30,0,$26,0 .prologue 0 addq $18,$16,$6 # E : max address to write to bis $16,$16,$0 # E : return value xor $16,$6,$1 # E : will complete write be within one quadword? ble $18,end # U : zero length requested? bic $1,7,$1 # E : fit within a single quadword beq $1,within_one_quad # U : and $16,7,$3 # E : Target addr misalignment beq $3,aligned # U : target is 0mod8 /* * Target address is misaligned, and won't fit within a quadword */ ldq_u $4,0($16) # L : Fetch first partial bis $16,$16,$5 # E : Save the address insql $17,$16,$2 # U : Insert new bytes subq $3,8,$3 # E : Invert (for addressing uses) addq $18,$3,$18 # E : $18 is new count ($3 is negative) mskql $4,$16,$4 # U : clear relevant parts of the quad subq $16,$3,$16 # E : $16 is new aligned destination bis $2,$4,$1 # E : Final bytes nop stq_u $1,0($5) # L : Store result nop nop .align 4 aligned: /* * We are now guaranteed to be quad aligned, with at least * one partial quad to write. */ sra $18,3,$3 # U : Number of remaining quads to write and $18,7,$18 # E : Number of trailing bytes to write bis $16,$16,$5 # E : Save dest address beq $3,no_quad # U : tail stuff only /* * it's worth the effort to unroll this and use wh64 if possible * Lifted a bunch of code from clear_user.S * At this point, entry values are: * $16 Current destination address * $5 A copy of $16 * $6 The max quadword address to write to * $18 Number trailer bytes * $3 Number quads to write */ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop) subq $3, 16, $4 # E : Only try to unroll if > 128 bytes subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64) blt $4, loop # U : /* * We know we've got at least 16 quads, minimum of one trip * through unrolled loop. Do a quad at a time to get us 0mod64 * aligned. */ nop # E : nop # E : nop # E : beq $1, $bigalign # U : $alignmod64: stq $17, 0($5) # L : subq $3, 1, $3 # E : For consistency later addq $1, 8, $1 # E : Increment towards zero for alignment addq $5, 8, $4 # E : Initial wh64 address (filler instruction) nop nop addq $5, 8, $5 # E : Inc address blt $1, $alignmod64 # U : $bigalign: /* * $3 - number quads left to go * $5 - target address (aligned 0mod64) * $17 - mask of stuff to store * Scratch registers available: $7, $2, $4, $1 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ $do_wh64: wh64 ($4) # L1 : memory subsystem write hint subq $3, 24, $2 # E : For determining future wh64 addresses stq $17, 0($5) # L : nop # E : addq $5, 128, $4 # E : speculative target of next wh64 stq $17, 8($5) # L : stq $17, 16($5) # L : addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr) stq $17, 24($5) # L : stq $17, 32($5) # L : cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle nop stq $17, 40($5) # L : stq $17, 48($5) # L : subq $3, 16, $2 # E : Repeat the loop at least once more? nop stq $17, 56($5) # L : addq $5, 64, $5 # E : subq $3, 8, $3 # E : bge $2, $do_wh64 # U : nop nop nop beq $3, no_quad # U : Might have finished already .align 4 /* * Simple loop for trailing quadwords, or for small amounts * of data (where we can't use an unrolled loop and wh64) */ loop: stq $17,0($5) # L : subq $3,1,$3 # E : Decrement number quads left addq $5,8,$5 # E : Inc address bne $3,loop # U : more? no_quad: /* * Write 0..7 trailing bytes. */ nop # E : beq $18,end # U : All done? ldq $7,0($5) # L : mskqh $7,$6,$2 # U : Mask final quad insqh $17,$6,$4 # U : New bits bis $2,$4,$1 # E : Put it all together stq $1,0($5) # L : And back to memory ret $31,($26),1 # L0 : within_one_quad: ldq_u $1,0($16) # L : insql $17,$16,$2 # U : New bits mskql $1,$16,$4 # U : Clear old bis $2,$4,$2 # E : New result mskql $2,$6,$4 # U : mskqh $1,$6,$2 # U : bis $2,$4,$1 # E : stq_u $1,0($16) # L : end: nop nop nop ret $31,($26),1 # L0 : .end __constant_c_memset EXPORT_SYMBOL(__constant_c_memset) /* * This is a replicant of the __constant_c_memset code, rescheduled * to mask stalls. Note that entry point names also had to change */ .align 5 .ent __memset16 __memset16: .frame $30,0,$26,0 .prologue 0 inswl $17,0,$5 # U : 000000000000c1c2 inswl $17,2,$2 # U : 00000000c1c20000 bis $16,$16,$0 # E : return value addq $18,$16,$6 # E : max address to write to ble $18, end_w # U : zero length requested? inswl $17,4,$3 # U : 0000c1c200000000 inswl $17,6,$4 # U : c1c2000000000000 xor $16,$6,$1 # E : will complete write be within one quadword? or $2,$5,$2 # E : 00000000c1c2c1c2 or $3,$4,$17 # E : c1c2c1c200000000 bic $1,7,$1 # E : fit within a single quadword and $16,7,$3 # E : Target addr misalignment or $17,$2,$17 # E : c1c2c1c2c1c2c1c2 beq $1,within_quad_w # U : nop beq $3,aligned_w # U : target is 0mod8 /* * Target address is misaligned, and won't fit within a quadword */ ldq_u $4,0($16) # L : Fetch first partial bis $16,$16,$5 # E : Save the address insql $17,$16,$2 # U : Insert new bytes subq $3,8,$3 # E : Invert (for addressing uses) addq $18,$3,$18 # E : $18 is new count ($3 is negative) mskql $4,$16,$4 # U : clear relevant parts of the quad subq $16,$3,$16 # E : $16 is new aligned destination bis $2,$4,$1 # E : Final bytes nop stq_u $1,0($5) # L : Store result nop nop .align 4 aligned_w: /* * We are now guaranteed to be quad aligned, with at least * one partial quad to write. */ sra $18,3,$3 # U : Number of remaining quads to write and $18,7,$18 # E : Number of trailing bytes to write bis $16,$16,$5 # E : Save dest address beq $3,no_quad_w # U : tail stuff only /* * it's worth the effort to unroll this and use wh64 if possible * Lifted a bunch of code from clear_user.S * At this point, entry values are: * $16 Current destination address * $5 A copy of $16 * $6 The max quadword address to write to * $18 Number trailer bytes * $3 Number quads to write */ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop) subq $3, 16, $4 # E : Only try to unroll if > 128 bytes subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64) blt $4, loop_w # U : /* * We know we've got at least 16 quads, minimum of one trip * through unrolled loop. Do a quad at a time to get us 0mod64 * aligned. */ nop # E : nop # E : nop # E : beq $1, $bigalign_w # U : $alignmod64_w: stq $17, 0($5) # L : subq $3, 1, $3 # E : For consistency later addq $1, 8, $1 # E : Increment towards zero for alignment addq $5, 8, $4 # E : Initial wh64 address (filler instruction) nop nop addq $5, 8, $5 # E : Inc address blt $1, $alignmod64_w # U : $bigalign_w: /* * $3 - number quads left to go * $5 - target address (aligned 0mod64) * $17 - mask of stuff to store * Scratch registers available: $7, $2, $4, $1 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ $do_wh64_w: wh64 ($4) # L1 : memory subsystem write hint subq $3, 24, $2 # E : For determining future wh64 addresses stq $17, 0($5) # L : nop # E : addq $5, 128, $4 # E : speculative target of next wh64 stq $17, 8($5) # L : stq $17, 16($5) # L : addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr) stq $17, 24($5) # L : stq $17, 32($5) # L : cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle nop stq $17, 40($5) # L : stq $17, 48($5) # L : subq $3, 16, $2 # E : Repeat the loop at least once more? nop stq $17, 56($5) # L : addq $5, 64, $5 # E : subq $3, 8, $3 # E : bge $2, $do_wh64_w # U : nop nop nop beq $3, no_quad_w # U : Might have finished already .align 4 /* * Simple loop for trailing quadwords, or for small amounts * of data (where we can't use an unrolled loop and wh64) */ loop_w: stq $17,0($5) # L : subq $3,1,$3 # E : Decrement number quads left addq $5,8,$5 # E : Inc address bne $3,loop_w # U : more? no_quad_w: /* * Write 0..7 trailing bytes. */ nop # E : beq $18,end_w # U : All done? ldq $7,0($5) # L : mskqh $7,$6,$2 # U : Mask final quad insqh $17,$6,$4 # U : New bits bis $2,$4,$1 # E : Put it all together stq $1,0($5) # L : And back to memory ret $31,($26),1 # L0 : within_quad_w: ldq_u $1,0($16) # L : insql $17,$16,$2 # U : New bits mskql $1,$16,$4 # U : Clear old bis $2,$4,$2 # E : New result mskql $2,$6,$4 # U : mskqh $1,$6,$2 # U : bis $2,$4,$1 # E : stq_u $1,0($16) # L : end_w: nop nop nop ret $31,($26),1 # L0 : .end __memset16 EXPORT_SYMBOL(__memset16) memset = ___memset __memset = ___memset EXPORT_SYMBOL(memset) EXPORT_SYMBOL(__memset)
aixcc-public/challenge-001-exemplar-source
2,999
arch/alpha/lib/callback_srm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/callback_srm.S */ #include <asm/console.h> #include <asm/export.h> .text #define HWRPB_CRB_OFFSET 0xc0 #if defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) .align 4 srm_dispatch: #if defined(CONFIG_ALPHA_GENERIC) ldl $4,alpha_using_srm beq $4,nosrm #endif ldq $0,hwrpb # gp is set up by CALLBACK macro. ldl $25,0($25) # Pick up the wrapper data. mov $20,$21 # Shift arguments right. mov $19,$20 ldq $1,HWRPB_CRB_OFFSET($0) mov $18,$19 mov $17,$18 mov $16,$17 addq $0,$1,$2 # CRB address ldq $27,0($2) # DISPATCH procedure descriptor (VMS call std) extwl $25,0,$16 # SRM callback function code ldq $3,8($27) # call address extwl $25,2,$25 # argument information (VMS calling std) jmp ($3) # Return directly to caller of wrapper. .align 4 .globl srm_fixup .ent srm_fixup srm_fixup: ldgp $29,0($27) #if defined(CONFIG_ALPHA_GENERIC) ldl $4,alpha_using_srm beq $4,nosrm #endif ldq $0,hwrpb ldq $1,HWRPB_CRB_OFFSET($0) addq $0,$1,$2 # CRB address ldq $27,16($2) # VA of FIXUP procedure descriptor ldq $3,8($27) # call address lda $25,2($31) # two integer arguments jmp ($3) # Return directly to caller of srm_fixup. .end srm_fixup #if defined(CONFIG_ALPHA_GENERIC) .align 3 nosrm: lda $0,-1($31) ret #endif #define CALLBACK(NAME, CODE, ARG_CNT) \ .align 4; .globl callback_##NAME; .ent callback_##NAME; callback_##NAME##: \ ldgp $29,0($27); br $25,srm_dispatch; .word CODE, ARG_CNT; .end callback_##NAME #else /* defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) */ #define CALLBACK(NAME, CODE, ARG_CNT) \ .align 3; .globl callback_##NAME; .ent callback_##NAME; callback_##NAME##: \ lda $0,-1($31); ret; .end callback_##NAME .align 3 .globl srm_fixup .ent srm_fixup srm_fixup: lda $0,-1($31) ret .end srm_fixup #endif /* defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) */ CALLBACK(puts, CCB_PUTS, 4) CALLBACK(open, CCB_OPEN, 3) CALLBACK(close, CCB_CLOSE, 2) CALLBACK(read, CCB_READ, 5) CALLBACK(open_console, CCB_OPEN_CONSOLE, 1) CALLBACK(close_console, CCB_CLOSE_CONSOLE, 1) CALLBACK(getenv, CCB_GET_ENV, 4) CALLBACK(setenv, CCB_SET_ENV, 4) CALLBACK(getc, CCB_GETC, 2) CALLBACK(reset_term, CCB_RESET_TERM, 2) CALLBACK(term_int, CCB_SET_TERM_INT, 3) CALLBACK(term_ctl, CCB_SET_TERM_CTL, 3) CALLBACK(process_keycode, CCB_PROCESS_KEYCODE, 3) CALLBACK(ioctl, CCB_IOCTL, 6) CALLBACK(write, CCB_WRITE, 5) CALLBACK(reset_env, CCB_RESET_ENV, 4) CALLBACK(save_env, CCB_SAVE_ENV, 1) CALLBACK(pswitch, CCB_PSWITCH, 3) CALLBACK(bios_emul, CCB_BIOS_EMUL, 5) EXPORT_SYMBOL(callback_getenv) EXPORT_SYMBOL(callback_setenv) EXPORT_SYMBOL(callback_save_env) .data __alpha_using_srm: # For use by bootpheader .long 7 # value is not 1 for link debugging .weak alpha_using_srm; alpha_using_srm = __alpha_using_srm __callback_init_done: # For use by bootpheader .long 7 # value is not 1 for link debugging .weak callback_init_done; callback_init_done = __callback_init_done
aixcc-public/challenge-001-exemplar-source
6,378
arch/alpha/lib/ev6-memcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-memcpy.S * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com> * * Reasonably optimized memcpy() routine for the Alpha 21264 * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * * Temp usage notes: * $1,$2, - scratch */ #include <asm/export.h> .set noreorder .set noat .align 4 .globl memcpy .ent memcpy memcpy: .frame $30,0,$26,0 .prologue 0 mov $16, $0 # E : copy dest to return ble $18, $nomoredata # U : done with the copy? xor $16, $17, $1 # E : are source and dest alignments the same? and $1, 7, $1 # E : are they the same mod 8? bne $1, $misaligned # U : Nope - gotta do this the slow way /* source and dest are same mod 8 address */ and $16, 7, $1 # E : Are both 0mod8? beq $1, $both_0mod8 # U : Yes nop # E : /* * source and dest are same misalignment. move a byte at a time * until a 0mod8 alignment for both is reached. * At least one byte more to move */ $head_align: ldbu $1, 0($17) # L : grab a byte subq $18, 1, $18 # E : count-- addq $17, 1, $17 # E : src++ stb $1, 0($16) # L : addq $16, 1, $16 # E : dest++ and $16, 7, $1 # E : Are we at 0mod8 yet? ble $18, $nomoredata # U : done with the copy? bne $1, $head_align # U : $both_0mod8: cmple $18, 127, $1 # E : Can we unroll the loop? bne $1, $no_unroll # U : and $16, 63, $1 # E : get mod64 alignment beq $1, $do_unroll # U : no single quads to fiddle $single_head_quad: ldq $1, 0($17) # L : get 8 bytes subq $18, 8, $18 # E : count -= 8 addq $17, 8, $17 # E : src += 8 nop # E : stq $1, 0($16) # L : store addq $16, 8, $16 # E : dest += 8 and $16, 63, $1 # E : get mod64 alignment bne $1, $single_head_quad # U : still not fully aligned $do_unroll: addq $16, 64, $7 # E : Initial (+1 trip) wh64 address cmple $18, 127, $1 # E : Can we go through the unrolled loop? bne $1, $tail_quads # U : Nope nop # E : $unroll_body: wh64 ($7) # L1 : memory subsystem hint: 64 bytes at # ($7) are about to be over-written ldq $6, 0($17) # L0 : bytes 0..7 nop # E : nop # E : ldq $4, 8($17) # L : bytes 8..15 ldq $5, 16($17) # L : bytes 16..23 addq $7, 64, $7 # E : Update next wh64 address nop # E : ldq $3, 24($17) # L : bytes 24..31 addq $16, 64, $1 # E : fallback value for wh64 nop # E : nop # E : addq $17, 32, $17 # E : src += 32 bytes stq $6, 0($16) # L : bytes 0..7 nop # E : nop # E : stq $4, 8($16) # L : bytes 8..15 stq $5, 16($16) # L : bytes 16..23 subq $18, 192, $2 # E : At least two more trips to go? nop # E : stq $3, 24($16) # L : bytes 24..31 addq $16, 32, $16 # E : dest += 32 bytes nop # E : nop # E : ldq $6, 0($17) # L : bytes 0..7 ldq $4, 8($17) # L : bytes 8..15 cmovlt $2, $1, $7 # E : Latency 2, extra map slot - Use # fallback wh64 address if < 2 more trips nop # E : ldq $5, 16($17) # L : bytes 16..23 ldq $3, 24($17) # L : bytes 24..31 addq $16, 32, $16 # E : dest += 32 subq $18, 64, $18 # E : count -= 64 addq $17, 32, $17 # E : src += 32 stq $6, -32($16) # L : bytes 0..7 stq $4, -24($16) # L : bytes 8..15 cmple $18, 63, $1 # E : At least one more trip? stq $5, -16($16) # L : bytes 16..23 stq $3, -8($16) # L : bytes 24..31 nop # E : beq $1, $unroll_body $tail_quads: $no_unroll: .align 4 subq $18, 8, $18 # E : At least a quad left? blt $18, $less_than_8 # U : Nope nop # E : nop # E : $move_a_quad: ldq $1, 0($17) # L : fetch 8 subq $18, 8, $18 # E : count -= 8 addq $17, 8, $17 # E : src += 8 nop # E : stq $1, 0($16) # L : store 8 addq $16, 8, $16 # E : dest += 8 bge $18, $move_a_quad # U : nop # E : $less_than_8: .align 4 addq $18, 8, $18 # E : add back for trailing bytes ble $18, $nomoredata # U : All-done nop # E : nop # E : /* Trailing bytes */ $tail_bytes: subq $18, 1, $18 # E : count-- ldbu $1, 0($17) # L : fetch a byte addq $17, 1, $17 # E : src++ nop # E : stb $1, 0($16) # L : store a byte addq $16, 1, $16 # E : dest++ bgt $18, $tail_bytes # U : more to be done? nop # E : /* branching to exit takes 3 extra cycles, so replicate exit here */ ret $31, ($26), 1 # L0 : nop # E : nop # E : nop # E : $misaligned: mov $0, $4 # E : dest temp and $0, 7, $1 # E : dest alignment mod8 beq $1, $dest_0mod8 # U : life doesnt totally suck nop $aligndest: ble $18, $nomoredata # U : ldbu $1, 0($17) # L : fetch a byte subq $18, 1, $18 # E : count-- addq $17, 1, $17 # E : src++ stb $1, 0($4) # L : store it addq $4, 1, $4 # E : dest++ and $4, 7, $1 # E : dest 0mod8 yet? bne $1, $aligndest # U : go until we are aligned. /* Source has unknown alignment, but dest is known to be 0mod8 */ $dest_0mod8: subq $18, 8, $18 # E : At least a quad left? blt $18, $misalign_tail # U : Nope ldq_u $3, 0($17) # L : seed (rotating load) of 8 bytes nop # E : $mis_quad: ldq_u $16, 8($17) # L : Fetch next 8 extql $3, $17, $3 # U : masking extqh $16, $17, $1 # U : masking bis $3, $1, $1 # E : merged bytes to store subq $18, 8, $18 # E : count -= 8 addq $17, 8, $17 # E : src += 8 stq $1, 0($4) # L : store 8 (aligned) mov $16, $3 # E : "rotate" source data addq $4, 8, $4 # E : dest += 8 bge $18, $mis_quad # U : More quads to move nop nop $misalign_tail: addq $18, 8, $18 # E : account for tail stuff ble $18, $nomoredata # U : nop nop $misalign_byte: ldbu $1, 0($17) # L : fetch 1 subq $18, 1, $18 # E : count-- addq $17, 1, $17 # E : src++ nop # E : stb $1, 0($4) # L : store addq $4, 1, $4 # E : dest++ bgt $18, $misalign_byte # U : more to go? nop $nomoredata: ret $31, ($26), 1 # L0 : nop # E : nop # E : nop # E : .end memcpy EXPORT_SYMBOL(memcpy) /* For backwards module compatibility. */ __memcpy = memcpy .globl __memcpy
aixcc-public/challenge-001-exemplar-source
9,666
arch/alpha/lib/ev6-stxcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-stxcpy.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Copy a null-terminated string from SRC to DST. * * This is an internal routine used by strcpy, stpcpy, and strcat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * * On output: * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * * Furthermore, v0, a3-a5, t11, and t12 are untouched. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxcpy_aligned .align 4 stxcpy_aligned: .frame sp, 0, t9 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # E : build a mask against false zero mskqh t2, a1, t2 # U : detection in the src word (stall) mskqh t1, a1, t3 # U : ornot t1, t2, t2 # E : (stall) mskql t0, a1, t0 # U : assemble the first output word cmpbge zero, t2, t8 # E : bits set iff null found or t0, t3, t1 # E : (stall) bne t8, $a_eos # U : (stall) /* On entry to this basic block: t0 == the first destination word for masking back in t1 == a source word not containing a null. */ /* Nops here to separate store quads from load quads */ $a_loop: stq_u t1, 0(a0) # L : addq a0, 8, a0 # E : nop nop ldq_u t1, 0(a1) # L : Latency=3 addq a1, 8, a1 # E : cmpbge zero, t1, t8 # E : (3 cycle stall) beq t8, $a_loop # U : (stall for t8) /* Take care of the final (partial) word store. On entry to this basic block we have: t1 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t6 # E : find low bit set and t8, t6, t12 # E : (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # E : (stall) bne t6, 1f # U : (stall) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t0, 0(a0) # L : Latency=3 subq t12, 1, t6 # E : zapnot t1, t6, t1 # U : clear src bytes >= null (stall) or t12, t6, t8 # E : (stall) zap t0, t8, t0 # E : clear dst bytes <= null or t0, t1, t1 # E : (stall) nop nop 1: stq_u t1, 0(a0) # L : ret (t9) # L0 : Latency=3 nop nop .end stxcpy_aligned .align 4 .ent __stxcpy .globl __stxcpy __stxcpy: .frame sp, 0, t9 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t0 # E : unop # E : and t0, 7, t0 # E : (stall) bne t0, $unaligned # U : (stall) /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # L : load first src word and a0, 7, t0 # E : take care not to load a word ... addq a1, 8, a1 # E : beq t0, stxcpy_aligned # U : ... if we wont need it (stall) ldq_u t0, 0(a0) # L : br stxcpy_aligned # L0 : Latency=3 nop nop /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 4 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, for masking back in, if needed else 0 t1 == the low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # L : addq a1, 8, a1 # E : extql t1, a1, t1 # U : (stall on a1) extqh t2, a1, t4 # U : (stall on a1) mskql t0, a0, t0 # U : or t1, t4, t1 # E : mskqh t1, a0, t1 # U : (stall on t1) or t0, t1, t1 # E : (stall on t1) or t1, t6, t6 # E : cmpbge zero, t6, t8 # E : (stall) lda t6, -1 # E : for masking just below bne t8, $u_final # U : (stall) mskql t6, a1, t6 # U : mask out the bits we have or t6, t2, t2 # E : already extracted before (stall) cmpbge zero, t2, t8 # E : testing eos (stall) bne t8, $u_late_head_exit # U : (stall) /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ stq_u t1, 0(a0) # L : store first output word addq a0, 8, a0 # E : extql t2, a1, t0 # U : position ho-bits of lo word ldq_u t2, 8(a1) # U : read next high-order source word addq a1, 8, a1 # E : cmpbge zero, t2, t8 # E : (stall for t2) nop # E : bne t8, $u_eos # U : (stall) /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 3 $u_loop: extqh t2, a1, t1 # U : extract high bits for current word addq a1, 8, a1 # E : (stall) extql t2, a1, t3 # U : extract low bits for next time (stall) addq a0, 8, a0 # E : or t0, t1, t1 # E : current dst word now complete ldq_u t2, 0(a1) # L : Latency=3 load high word for next time stq_u t1, -8(a0) # L : save the current word (stall) mov t3, t0 # E : cmpbge zero, t2, t8 # E : test new word for eos beq t8, $u_loop # U : (stall) nop nop /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: extqh t2, a1, t1 # U : or t0, t1, t1 # E : first (partial) source word complete (stall) cmpbge zero, t1, t8 # E : is the null in this first bit? (stall) bne t8, $u_final # U : (stall) $u_late_head_exit: stq_u t1, 0(a0) # L : the null was in the high-order bits addq a0, 8, a0 # E : extql t2, a1, t1 # U : cmpbge zero, t1, t8 # E : (stall) /* Take care of a final (probably partial) result word. On entry to this basic block: t1 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # E : isolate low bit set and t6, t8, t12 # E : (stall) and t12, 0x80, t6 # E : avoid dest word load if we can (stall) bne t6, 1f # U : (stall) ldq_u t0, 0(a0) # E : subq t12, 1, t6 # E : or t6, t12, t8 # E : (stall) zapnot t1, t6, t1 # U : kill source bytes >= null (stall) zap t0, t8, t0 # U : kill dest bytes <= null (2 cycle data stall) or t0, t1, t1 # E : (stall) nop nop 1: stq_u t1, 0(a0) # L : ret (t9) # L0 : Latency=3 nop nop /* Unaligned copy entry point. */ .align 4 $unaligned: ldq_u t1, 0(a1) # L : load first source word and a0, 7, t4 # E : find dest misalignment and a1, 7, t5 # E : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # E : mov zero, t6 # E : beq t4, 1f # U : ldq_u t0, 0(a0) # L : lda t6, -1 # E : mskql t6, a0, t6 # U : nop nop nop 1: subq a1, t4, a1 # E : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ cmplt t4, t5, t12 # E : beq t12, $u_head # U : lda t2, -1 # E : mask out leading garbage in source mskqh t2, t5, t2 # U : ornot t1, t2, t3 # E : (stall) cmpbge zero, t3, t8 # E : is there a zero? (stall) beq t8, $u_head # U : (stall) /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # L : negq t8, t6 # E : build bitmask of bytes <= zero and t6, t8, t12 # E : (stall) and a1, 7, t5 # E : subq t12, 1, t6 # E : or t6, t12, t8 # E : (stall) srl t12, t5, t12 # U : adjust final null return value zapnot t2, t8, t2 # U : prepare source word; mirror changes (stall) and t1, t2, t1 # E : to source validity mask extql t2, a1, t2 # U : extql t1, a1, t1 # U : (stall) andnot t0, t2, t0 # .. e1 : zero place for source to reside (stall) or t0, t1, t1 # e1 : and put it there stq_u t1, 0(a0) # .. e0 : (stall) ret (t9) # e1 : nop .end __stxcpy
aixcc-public/challenge-001-exemplar-source
1,914
arch/alpha/lib/strncat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strncat.S * Contributed by Richard Henderson (rth@tamu.edu) * * Append no more than COUNT characters from the null-terminated string SRC * to the null-terminated string DST. Always null-terminate the new DST. * * This differs slightly from the semantics in libc in that we never write * past count, whereas libc may write to count+1. This follows the generic * implementation in lib/string.c and is, IMHO, more sensible. */ #include <asm/export.h> .text .align 3 .globl strncat .ent strncat strncat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set up return value beq $18, $zerocount /* Find the end of the string. */ ldq_u $1, 0($16) # load first quadword ($16 may be misaligned) lda $2, -1($31) insqh $2, $16, $2 andnot $16, 7, $16 or $2, $1, $1 cmpbge $31, $1, $2 # bits set iff byte == 0 bne $2, $found $loop: ldq $1, 8($16) addq $16, 8, $16 cmpbge $31, $1, $2 beq $2, $loop $found: negq $2, $3 # clear all but least set bit and $2, $3, $2 and $2, 0xf0, $3 # binary search for that set bit and $2, 0xcc, $4 and $2, 0xaa, $5 cmovne $3, 4, $3 cmovne $4, 2, $4 cmovne $5, 1, $5 addq $3, $4, $3 addq $16, $5, $16 addq $16, $3, $16 /* Now do the append. */ bsr $23, __stxncpy /* Worry about the null termination. */ zapnot $1, $27, $2 # was last byte a null? bne $2, 0f ret 0: cmplt $27, $24, $2 # did we fill the buffer completely? or $2, $18, $2 bne $2, 2f and $24, 0x80, $2 # no zero next byte bne $2, 1f /* Here there are bytes left in the current word. Clear one. */ addq $24, $24, $24 # end-of-count bit <<= 1 2: zap $1, $24, $1 stq_u $1, 0($16) ret 1: /* Here we must read the next DST word and clear the first byte. */ ldq_u $1, 8($16) zap $1, 1, $1 stq_u $1, 8($16) $zerocount: ret .end strncat EXPORT_SYMBOL(strncat)
aixcc-public/challenge-001-exemplar-source
10,883
arch/alpha/lib/stxncpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/stxncpy.S * Contributed by Richard Henderson (rth@tamu.edu) * * Copy no more than COUNT bytes of the null-terminated string from * SRC to DST. * * This is an internal routine used by strncpy, stpncpy, and strncat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * a2 = COUNT * * Furthermore, COUNT may not be zero. * * On output: * t0 = last word written * t10 = bitmask (with one bit set) indicating the byte position of * the end of the range specified by COUNT * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * a2 = the number of full words left in COUNT * * Furthermore, v0, a3-a5, t11, and $at are untouched. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxncpy_aligned .align 3 stxncpy_aligned: .frame sp, 0, t9, 0 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # e1 : build a mask against false zero mskqh t2, a1, t2 # e0 : detection in the src word mskqh t1, a1, t3 # e0 : ornot t1, t2, t2 # .. e1 : mskql t0, a1, t0 # e0 : assemble the first output word cmpbge zero, t2, t8 # .. e1 : bits set iff null found or t0, t3, t0 # e0 : beq a2, $a_eoc # .. e1 : bne t8, $a_eos # .. e1 : /* On entry to this basic block: t0 == a source word not containing a null. */ $a_loop: stq_u t0, 0(a0) # e0 : addq a0, 8, a0 # .. e1 : ldq_u t0, 0(a1) # e0 : addq a1, 8, a1 # .. e1 : subq a2, 1, a2 # e0 : cmpbge zero, t0, t8 # .. e1 (stall) beq a2, $a_eoc # e1 : beq t8, $a_loop # e1 : /* Take care of the final (partial) word store. At this point the end-of-count bit is set in t8 iff it applies. On entry to this basic block we have: t0 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t12 # e0 : find low bit set and t8, t12, t12 # e1 (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # e0 : bne t6, 1f # .. e1 (zdb) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t1, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : or t12, t6, t8 # e0 : unop # zapnot t0, t8, t0 # e0 : clear src bytes > null zap t1, t8, t1 # .. e1 : clear dst bytes <= null or t0, t1, t0 # e1 : 1: stq_u t0, 0(a0) # e0 : ret (t9) # e1 : /* Add the end-of-count bit to the eos detection bitmask. */ $a_eoc: or t10, t8, t8 br $a_eos .end stxncpy_aligned .align 3 .ent __stxncpy .globl __stxncpy __stxncpy: .frame sp, 0, t9, 0 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t1 # e0 : and a0, 7, t0 # .. e1 : find dest misalignment and t1, 7, t1 # e0 : addq a2, t0, a2 # .. e1 : bias count by dest misalignment subq a2, 1, a2 # e0 : and a2, 7, t2 # e1 : srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8 addq zero, 1, t10 # .. e1 : sll t10, t2, t10 # e0 : t10 = bitmask of last count byte bne t1, $unaligned # .. e1 : /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # e0 : load first src word addq a1, 8, a1 # .. e1 : beq t0, stxncpy_aligned # avoid loading dest word if not needed ldq_u t0, 0(a0) # e0 : br stxncpy_aligned # .. e1 : /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 3 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, unmasked t1 == the shifted low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # e0 : load second src word addq a1, 8, a1 # .. e1 : mskql t0, a0, t0 # e0 : mask trailing garbage in dst extqh t2, a1, t4 # e0 : or t1, t4, t1 # e1 : first aligned src word complete mskqh t1, a0, t1 # e0 : mask leading garbage in src or t0, t1, t0 # e0 : first output word complete or t0, t6, t6 # e1 : mask original data for zero test cmpbge zero, t6, t8 # e0 : beq a2, $u_eocfin # .. e1 : lda t6, -1 # e0 : bne t8, $u_final # .. e1 : mskql t6, a1, t6 # e0 : mask out bits already seen nop # .. e1 : stq_u t0, 0(a0) # e0 : store first output word or t6, t2, t2 # .. e1 : cmpbge zero, t2, t8 # e0 : find nulls in second partial addq a0, 8, a0 # .. e1 : subq a2, 1, a2 # e0 : bne t8, $u_late_head_exit # .. e1 : /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ extql t2, a1, t1 # e0 : position hi-bits of lo word beq a2, $u_eoc # .. e1 : ldq_u t2, 8(a1) # e0 : read next high-order source word addq a1, 8, a1 # .. e1 : extqh t2, a1, t0 # e0 : position lo-bits of hi word (stall) cmpbge zero, t2, t8 # .. e1 : nop # e0 : bne t8, $u_eos # .. e1 : /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 3 $u_loop: or t0, t1, t0 # e0 : current dst word now complete subq a2, 1, a2 # .. e1 : decrement word count stq_u t0, 0(a0) # e0 : save the current word addq a0, 8, a0 # .. e1 : extql t2, a1, t1 # e0 : extract high bits for next time beq a2, $u_eoc # .. e1 : ldq_u t2, 8(a1) # e0 : load high word for next time addq a1, 8, a1 # .. e1 : nop # e0 : cmpbge zero, t2, t8 # e1 : test new word for eos (stall) extqh t2, a1, t0 # e0 : extract low bits for current word beq t8, $u_loop # .. e1 : /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: or t0, t1, t0 # e0 : first (partial) source word complete nop # .. e1 : cmpbge zero, t0, t8 # e0 : is the null in this first bit? bne t8, $u_final # .. e1 (zdb) stq_u t0, 0(a0) # e0 : the null was in the high-order bits addq a0, 8, a0 # .. e1 : subq a2, 1, a2 # e1 : $u_late_head_exit: extql t2, a1, t0 # .. e0 : cmpbge zero, t0, t8 # e0 : or t8, t10, t6 # e1 : cmoveq a2, t6, t8 # e0 : nop # .. e1 : /* Take care of a final (probably partial) result word. On entry to this basic block: t0 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # e0 : isolate low bit set and t6, t8, t12 # e1 : and t12, 0x80, t6 # e0 : avoid dest word load if we can bne t6, 1f # .. e1 (zdb) ldq_u t1, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : or t6, t12, t8 # e0 : zapnot t0, t8, t0 # .. e1 : kill source bytes > null zap t1, t8, t1 # e0 : kill dest bytes <= null or t0, t1, t0 # e1 : 1: stq_u t0, 0(a0) # e0 : ret (t9) # .. e1 : /* Got to end-of-count before end of string. On entry to this basic block: t1 == the shifted high-order bits from the previous source word */ $u_eoc: and a1, 7, t6 # e1 : sll t10, t6, t6 # e0 : and t6, 0xff, t6 # e0 : bne t6, 1f # .. e1 : ldq_u t2, 8(a1) # e0 : load final src word nop # .. e1 : extqh t2, a1, t0 # e0 : extract low bits for last word or t1, t0, t1 # e1 : 1: cmpbge zero, t1, t8 mov t1, t0 $u_eocfin: # end-of-count, final word or t10, t8, t8 br $u_final /* Unaligned copy entry point. */ .align 3 $unaligned: ldq_u t1, 0(a1) # e0 : load first source word and a0, 7, t4 # .. e1 : find dest misalignment and a1, 7, t5 # e0 : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # .. e1 : mov zero, t6 # e0 : beq t4, 1f # .. e1 : ldq_u t0, 0(a0) # e0 : lda t6, -1 # .. e1 : mskql t6, a0, t6 # e0 : subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ 1: cmplt t4, t5, t12 # e1 : extql t1, a1, t1 # .. e0 : shift src into place lda t2, -1 # e0 : for creating masks later beq t12, $u_head # .. e1 : extql t2, a1, t2 # e0 : cmpbge zero, t1, t8 # .. e1 : is there a zero? andnot t2, t6, t2 # e0 : dest mask for a single word copy or t8, t10, t5 # .. e1 : test for end-of-count too cmpbge zero, t2, t3 # e0 : cmoveq a2, t5, t8 # .. e1 : andnot t8, t3, t8 # e0 : beq t8, $u_head # .. e1 (zdb) /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # e0 : negq t8, t6 # .. e1 : build bitmask of bytes <= zero mskqh t1, t4, t1 # e0 : and t6, t8, t12 # .. e1 : subq t12, 1, t6 # e0 : or t6, t12, t8 # e1 : zapnot t2, t8, t2 # e0 : prepare source word; mirror changes zapnot t1, t8, t1 # .. e1 : to source validity mask andnot t0, t2, t0 # e0 : zero place for source to reside or t0, t1, t0 # e1 : and put it there stq_u t0, 0(a0) # e0 : ret (t9) # .. e1 : .end __stxncpy
aixcc-public/challenge-001-exemplar-source
1,361
arch/alpha/lib/ev67-strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strlen.S * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com> * * Finds length of a 0-terminated string. Optimized for the * Alpha architecture: * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ #include <asm/export.h> .set noreorder .set noat .globl strlen .ent strlen .align 4 strlen: ldq_u $1, 0($16) # L : load first quadword ($16 may be misaligned) lda $2, -1($31) # E : insqh $2, $16, $2 # U : andnot $16, 7, $0 # E : or $2, $1, $1 # E : cmpbge $31, $1, $2 # E : $2 <- bitmask: bit i == 1 <==> i-th byte == 0 nop # E : bne $2, $found # U : $loop: ldq $1, 8($0) # L : addq $0, 8, $0 # E : addr += 8 cmpbge $31, $1, $2 # E : beq $2, $loop # U : $found: cttz $2, $3 # U0 : addq $0, $3, $0 # E : subq $0, $16, $0 # E : ret $31, ($26) # L0 : .end strlen EXPORT_SYMBOL(strlen)
aixcc-public/challenge-001-exemplar-source
11,554
arch/alpha/lib/ev6-stxncpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-stxncpy.S * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com> * * Copy no more than COUNT bytes of the null-terminated string from * SRC to DST. * * This is an internal routine used by strncpy, stpncpy, and strncat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * a2 = COUNT * * Furthermore, COUNT may not be zero. * * On output: * t0 = last word written * t10 = bitmask (with one bit set) indicating the byte position of * the end of the range specified by COUNT * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * a2 = the number of full words left in COUNT * * Furthermore, v0, a3-a5, t11, and $at are untouched. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxncpy_aligned .align 4 stxncpy_aligned: .frame sp, 0, t9, 0 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # E : build a mask against false zero mskqh t2, a1, t2 # U : detection in the src word (stall) mskqh t1, a1, t3 # U : ornot t1, t2, t2 # E : (stall) mskql t0, a1, t0 # U : assemble the first output word cmpbge zero, t2, t8 # E : bits set iff null found or t0, t3, t0 # E : (stall) beq a2, $a_eoc # U : bne t8, $a_eos # U : nop nop nop /* On entry to this basic block: t0 == a source word not containing a null. */ /* * nops here to: * separate store quads from load quads * limit of 1 bcond/quad to permit training */ $a_loop: stq_u t0, 0(a0) # L : addq a0, 8, a0 # E : subq a2, 1, a2 # E : nop ldq_u t0, 0(a1) # L : addq a1, 8, a1 # E : cmpbge zero, t0, t8 # E : beq a2, $a_eoc # U : beq t8, $a_loop # U : nop nop nop /* Take care of the final (partial) word store. At this point the end-of-count bit is set in t8 iff it applies. On entry to this basic block we have: t0 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t12 # E : find low bit set and t8, t12, t12 # E : (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # E : (stall) bne t6, 1f # U : (stall) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t1, 0(a0) # L : subq t12, 1, t6 # E : or t12, t6, t8 # E : (stall) zapnot t0, t8, t0 # U : clear src bytes > null (stall) zap t1, t8, t1 # .. e1 : clear dst bytes <= null or t0, t1, t0 # e1 : (stall) nop nop 1: stq_u t0, 0(a0) # L : ret (t9) # L0 : Latency=3 nop nop /* Add the end-of-count bit to the eos detection bitmask. */ $a_eoc: or t10, t8, t8 # E : br $a_eos # L0 : Latency=3 nop nop .end stxncpy_aligned .align 4 .ent __stxncpy .globl __stxncpy __stxncpy: .frame sp, 0, t9, 0 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t1 # E : and a0, 7, t0 # E : find dest misalignment and t1, 7, t1 # E : (stall) addq a2, t0, a2 # E : bias count by dest misalignment (stall) subq a2, 1, a2 # E : and a2, 7, t2 # E : (stall) srl a2, 3, a2 # U : a2 = loop counter = (count - 1)/8 (stall) addq zero, 1, t10 # E : sll t10, t2, t10 # U : t10 = bitmask of last count byte bne t1, $unaligned # U : /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # L : load first src word addq a1, 8, a1 # E : beq t0, stxncpy_aligned # U : avoid loading dest word if not needed ldq_u t0, 0(a0) # L : nop nop br stxncpy_aligned # .. e1 : nop nop nop /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 4 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, unmasked t1 == the shifted low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # L : Latency=3 load second src word addq a1, 8, a1 # E : mskql t0, a0, t0 # U : mask trailing garbage in dst extqh t2, a1, t4 # U : (3 cycle stall on t2) or t1, t4, t1 # E : first aligned src word complete (stall) mskqh t1, a0, t1 # U : mask leading garbage in src (stall) or t0, t1, t0 # E : first output word complete (stall) or t0, t6, t6 # E : mask original data for zero test (stall) cmpbge zero, t6, t8 # E : beq a2, $u_eocfin # U : lda t6, -1 # E : nop bne t8, $u_final # U : mskql t6, a1, t6 # U : mask out bits already seen stq_u t0, 0(a0) # L : store first output word or t6, t2, t2 # E : (stall) cmpbge zero, t2, t8 # E : find nulls in second partial addq a0, 8, a0 # E : subq a2, 1, a2 # E : bne t8, $u_late_head_exit # U : /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ extql t2, a1, t1 # U : position hi-bits of lo word beq a2, $u_eoc # U : ldq_u t2, 8(a1) # L : read next high-order source word addq a1, 8, a1 # E : extqh t2, a1, t0 # U : position lo-bits of hi word (stall) cmpbge zero, t2, t8 # E : nop bne t8, $u_eos # U : /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 4 $u_loop: or t0, t1, t0 # E : current dst word now complete subq a2, 1, a2 # E : decrement word count extql t2, a1, t1 # U : extract low bits for next time addq a0, 8, a0 # E : stq_u t0, -8(a0) # U : save the current word beq a2, $u_eoc # U : ldq_u t2, 8(a1) # U : Latency=3 load high word for next time addq a1, 8, a1 # E : extqh t2, a1, t0 # U : extract low bits (2 cycle stall) cmpbge zero, t2, t8 # E : test new word for eos nop beq t8, $u_loop # U : /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: or t0, t1, t0 # E : first (partial) source word complete nop cmpbge zero, t0, t8 # E : is the null in this first bit? (stall) bne t8, $u_final # U : (stall) stq_u t0, 0(a0) # L : the null was in the high-order bits addq a0, 8, a0 # E : subq a2, 1, a2 # E : nop $u_late_head_exit: extql t2, a1, t0 # U : cmpbge zero, t0, t8 # E : or t8, t10, t6 # E : (stall) cmoveq a2, t6, t8 # E : Latency=2, extra map slot (stall) /* Take care of a final (probably partial) result word. On entry to this basic block: t0 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # E : isolate low bit set and t6, t8, t12 # E : (stall) and t12, 0x80, t6 # E : avoid dest word load if we can (stall) bne t6, 1f # U : (stall) ldq_u t1, 0(a0) # L : subq t12, 1, t6 # E : or t6, t12, t8 # E : (stall) zapnot t0, t8, t0 # U : kill source bytes > null zap t1, t8, t1 # U : kill dest bytes <= null or t0, t1, t0 # E : (stall) nop nop 1: stq_u t0, 0(a0) # L : ret (t9) # L0 : Latency=3 /* Got to end-of-count before end of string. On entry to this basic block: t1 == the shifted high-order bits from the previous source word */ $u_eoc: and a1, 7, t6 # E : avoid final load if possible sll t10, t6, t6 # U : (stall) and t6, 0xff, t6 # E : (stall) bne t6, 1f # U : (stall) ldq_u t2, 8(a1) # L : load final src word nop extqh t2, a1, t0 # U : extract low bits for last word (stall) or t1, t0, t1 # E : (stall) 1: cmpbge zero, t1, t8 # E : mov t1, t0 # E : $u_eocfin: # end-of-count, final word or t10, t8, t8 # E : br $u_final # L0 : Latency=3 /* Unaligned copy entry point. */ .align 4 $unaligned: ldq_u t1, 0(a1) # L : load first source word and a0, 7, t4 # E : find dest misalignment and a1, 7, t5 # E : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # E : mov zero, t6 # E : beq t4, 1f # U : ldq_u t0, 0(a0) # L : lda t6, -1 # E : mskql t6, a0, t6 # U : nop nop subq a1, t4, a1 # E : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ 1: cmplt t4, t5, t12 # E : extql t1, a1, t1 # U : shift src into place lda t2, -1 # E : for creating masks later beq t12, $u_head # U : (stall) extql t2, a1, t2 # U : cmpbge zero, t1, t8 # E : is there a zero? andnot t2, t6, t2 # E : dest mask for a single word copy or t8, t10, t5 # E : test for end-of-count too cmpbge zero, t2, t3 # E : cmoveq a2, t5, t8 # E : Latency=2, extra map slot nop # E : keep with cmoveq andnot t8, t3, t8 # E : (stall) beq t8, $u_head # U : /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # L : negq t8, t6 # E : build bitmask of bytes <= zero mskqh t1, t4, t1 # U : and t6, t8, t12 # E : subq t12, 1, t6 # E : (stall) or t6, t12, t8 # E : (stall) zapnot t2, t8, t2 # U : prepare source word; mirror changes (stall) zapnot t1, t8, t1 # U : to source validity mask andnot t0, t2, t0 # E : zero place for source to reside or t0, t1, t0 # E : and put it there (stall both t0, t1) stq_u t0, 0(a0) # L : (stall) ret (t9) # L0 : Latency=3 nop nop nop .end __stxncpy
aixcc-public/challenge-001-exemplar-source
2,735
arch/alpha/lib/memmove.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/memmove.S * * Barely optimized memmove routine for Alpha EV5. * * This is hand-massaged output from the original memcpy.c. We defer to * memcpy whenever possible; the backwards copy loops are not unrolled. */ #include <asm/export.h> .set noat .set noreorder .text .align 4 .globl memmove .ent memmove memmove: ldgp $29, 0($27) unop nop .prologue 1 addq $16,$18,$4 addq $17,$18,$5 cmpule $4,$17,$1 /* dest + n <= src */ cmpule $5,$16,$2 /* dest >= src + n */ bis $1,$2,$1 mov $16,$0 xor $16,$17,$2 bne $1,memcpy !samegp and $2,7,$2 /* Test for src/dest co-alignment. */ and $16,7,$1 cmpule $16,$17,$3 bne $3,$memmove_up /* dest < src */ and $4,7,$1 bne $2,$misaligned_dn unop beq $1,$skip_aligned_byte_loop_head_dn $aligned_byte_loop_head_dn: lda $4,-1($4) lda $5,-1($5) unop ble $18,$egress ldq_u $3,0($5) ldq_u $2,0($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 and $4,7,$6 stq_u $1,0($4) bne $6,$aligned_byte_loop_head_dn $skip_aligned_byte_loop_head_dn: lda $18,-8($18) blt $18,$skip_aligned_word_loop_dn $aligned_word_loop_dn: ldq $1,-8($5) nop lda $5,-8($5) lda $18,-8($18) stq $1,-8($4) nop lda $4,-8($4) bge $18,$aligned_word_loop_dn $skip_aligned_word_loop_dn: lda $18,8($18) bgt $18,$byte_loop_tail_dn unop ret $31,($26),1 .align 4 $misaligned_dn: nop fnop unop beq $18,$egress $byte_loop_tail_dn: ldq_u $3,-1($5) ldq_u $2,-1($4) lda $5,-1($5) lda $4,-1($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 stq_u $1,0($4) bgt $18,$byte_loop_tail_dn br $egress $memmove_up: mov $16,$4 mov $17,$5 bne $2,$misaligned_up beq $1,$skip_aligned_byte_loop_head_up $aligned_byte_loop_head_up: unop ble $18,$egress ldq_u $3,0($5) ldq_u $2,0($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 lda $5,1($5) stq_u $1,0($4) lda $4,1($4) and $4,7,$6 bne $6,$aligned_byte_loop_head_up $skip_aligned_byte_loop_head_up: lda $18,-8($18) blt $18,$skip_aligned_word_loop_up $aligned_word_loop_up: ldq $1,0($5) nop lda $5,8($5) lda $18,-8($18) stq $1,0($4) nop lda $4,8($4) bge $18,$aligned_word_loop_up $skip_aligned_word_loop_up: lda $18,8($18) bgt $18,$byte_loop_tail_up unop ret $31,($26),1 .align 4 $misaligned_up: nop fnop unop beq $18,$egress $byte_loop_tail_up: ldq_u $3,0($5) ldq_u $2,0($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 stq_u $1,0($4) lda $5,1($5) lda $4,1($4) nop bgt $18,$byte_loop_tail_up $egress: ret $31,($26),1 nop nop nop .end memmove EXPORT_SYMBOL(memmove)
aixcc-public/challenge-001-exemplar-source
1,627
arch/alpha/lib/ev67-strcat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strcat.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Append a null-terminated string from SRC to DST. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. * Commentary: It seems bogus to walk the input string twice - once * to determine the length, and then again while doing the copy. * A significant (future) enhancement would be to only read the input * string once. */ #include <asm/export.h> .text .align 4 .globl strcat .ent strcat strcat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # E : set up return value /* Find the end of the string. */ ldq_u $1, 0($16) # L : load first quadword (a0 may be misaligned) lda $2, -1 # E : insqh $2, $16, $2 # U : andnot $16, 7, $16 # E : or $2, $1, $1 # E : cmpbge $31, $1, $2 # E : bits set iff byte == 0 bne $2, $found # U : $loop: ldq $1, 8($16) # L : addq $16, 8, $16 # E : cmpbge $31, $1, $2 # E : beq $2, $loop # U : $found: cttz $2, $3 # U0 : addq $16, $3, $16 # E : /* Now do the append. */ mov $26, $23 # E : br __stxcpy # L0 : .end strcat EXPORT_SYMBOL(strcat)
aixcc-public/challenge-001-exemplar-source
5,444
arch/alpha/lib/ev6-memchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-memchr.S * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Finds characters in a memory area. Optimized for the Alpha: * * - memory accessed as aligned quadwords only * - uses cmpbge to compare 8 bytes in parallel * - does binary search to find 0 byte in last * quadword (HAKMEM needed 12 instructions to * do this instead of the 9 instructions that * binary search needs). * * For correctness consider that: * * - only minimum number of quadwords may be accessed * - the third argument is an unsigned long * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> .set noreorder .set noat .align 4 .globl memchr .ent memchr memchr: .frame $30,0,$26,0 .prologue 0 # Hack -- if someone passes in (size_t)-1, hoping to just # search til the end of the address space, we will overflow # below when we find the address of the last byte. Given # that we will never have a 56-bit address space, cropping # the length is the easiest way to avoid trouble. zap $18, 0x80, $5 # U : Bound length beq $18, $not_found # U : ldq_u $1, 0($16) # L : load first quadword Latency=3 and $17, 0xff, $17 # E : L L U U : 00000000000000ch insbl $17, 1, $2 # U : 000000000000ch00 cmpult $18, 9, $4 # E : small (< 1 quad) string? or $2, $17, $17 # E : 000000000000chch lda $3, -1($31) # E : U L L U sll $17, 16, $2 # U : 00000000chch0000 addq $16, $5, $5 # E : Max search address or $2, $17, $17 # E : 00000000chchchch sll $17, 32, $2 # U : U L L U : chchchch00000000 or $2, $17, $17 # E : chchchchchchchch extql $1, $16, $7 # U : $7 is upper bits beq $4, $first_quad # U : ldq_u $6, -1($5) # L : L U U L : eight or less bytes to search Latency=3 extqh $6, $16, $6 # U : 2 cycle stall for $6 mov $16, $0 # E : nop # E : or $7, $6, $1 # E : L U L U $1 = quadword starting at $16 # Deal with the case where at most 8 bytes remain to be searched # in $1. E.g.: # $18 = 6 # $1 = ????c6c5c4c3c2c1 $last_quad: negq $18, $6 # E : xor $17, $1, $1 # E : srl $3, $6, $6 # U : $6 = mask of $18 bits set cmpbge $31, $1, $2 # E : L U L U nop nop and $2, $6, $2 # E : beq $2, $not_found # U : U L U L $found_it: #ifdef CONFIG_ALPHA_EV67 /* * Since we are guaranteed to have set one of the bits, we don't * have to worry about coming back with a 0x40 out of cttz... */ cttz $2, $3 # U0 : addq $0, $3, $0 # E : All done nop # E : ret # L0 : L U L U #else /* * Slow and clunky. It can probably be improved. * An exercise left for others. */ negq $2, $3 # E : and $2, $3, $2 # E : and $2, 0x0f, $1 # E : addq $0, 4, $3 # E : cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop # E : keep with cmov and $2, 0x33, $1 # E : addq $0, 2, $3 # E : U L U L : 2 cycle stall on $0 cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop # E : keep with cmov and $2, 0x55, $1 # E : addq $0, 1, $3 # E : U L U L : 2 cycle stall on $0 cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop nop ret # L0 : L U L U #endif # Deal with the case where $18 > 8 bytes remain to be # searched. $16 may not be aligned. .align 4 $first_quad: andnot $16, 0x7, $0 # E : insqh $3, $16, $2 # U : $2 = 0000ffffffffffff ($16<0:2> ff) xor $1, $17, $1 # E : or $1, $2, $1 # E : U L U L $1 = ====ffffffffffff cmpbge $31, $1, $2 # E : bne $2, $found_it # U : # At least one byte left to process. ldq $1, 8($0) # L : subq $5, 1, $18 # E : U L U L addq $0, 8, $0 # E : # Make $18 point to last quad to be accessed (the # last quad may or may not be partial). andnot $18, 0x7, $18 # E : cmpult $0, $18, $2 # E : beq $2, $final # U : U L U L # At least two quads remain to be accessed. subq $18, $0, $4 # E : $4 <- nr quads to be processed and $4, 8, $4 # E : odd number of quads? bne $4, $odd_quad_count # U : # At least three quads remain to be accessed mov $1, $4 # E : L U L U : move prefetched value to correct reg .align 4 $unrolled_loop: ldq $1, 8($0) # L : prefetch $1 xor $17, $4, $2 # E : cmpbge $31, $2, $2 # E : bne $2, $found_it # U : U L U L addq $0, 8, $0 # E : nop # E : nop # E : nop # E : $odd_quad_count: xor $17, $1, $2 # E : ldq $4, 8($0) # L : prefetch $4 cmpbge $31, $2, $2 # E : addq $0, 8, $6 # E : bne $2, $found_it # U : cmpult $6, $18, $6 # E : addq $0, 8, $0 # E : nop # E : bne $6, $unrolled_loop # U : mov $4, $1 # E : move prefetched value into $1 nop # E : nop # E : $final: subq $5, $0, $18 # E : $18 <- number of bytes left to do nop # E : nop # E : bne $18, $last_quad # U : $not_found: mov $31, $0 # E : nop # E : nop # E : ret # L0 : .end memchr EXPORT_SYMBOL(memchr)
aixcc-public/challenge-001-exemplar-source
3,198
arch/alpha/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/alpha/lib/memset.S * * This is an efficient (and small) implementation of the C library "memset()" * function for the alpha. * * (C) Copyright 1996 Linus Torvalds * * This routine is "moral-ware": you are free to use it any way you wish, and * the only obligation I put on you is a moral one: if you make any improvements * to the routine, please send me your improvements for me to use similarly. * * The scheduling comments are according to the EV5 documentation (and done by * hand, so they might well be incorrect, please do tell me about it..) */ #include <asm/export.h> .set noat .set noreorder .text .globl memset .globl __memset .globl ___memset .globl __memset16 .globl __constant_c_memset .ent ___memset .align 5 ___memset: .frame $30,0,$26,0 .prologue 0 and $17,255,$1 /* E1 */ insbl $17,1,$17 /* .. E0 */ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ sll $17,16,$1 /* E1 (p-c latency, next cycle) */ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ sll $17,32,$1 /* E1 (p-c latency, next cycle) */ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ ldq_u $31,0($30) /* .. E1 */ .align 5 __constant_c_memset: addq $18,$16,$6 /* E0 */ bis $16,$16,$0 /* .. E1 */ xor $16,$6,$1 /* E0 */ ble $18,end /* .. E1 */ bic $1,7,$1 /* E0 */ beq $1,within_one_quad /* .. E1 (note EV5 zero-latency forwarding) */ and $16,7,$3 /* E0 */ beq $3,aligned /* .. E1 (note EV5 zero-latency forwarding) */ ldq_u $4,0($16) /* E0 */ bis $16,$16,$5 /* .. E1 */ insql $17,$16,$2 /* E0 */ subq $3,8,$3 /* .. E1 */ addq $18,$3,$18 /* E0 $18 is new count ($3 is negative) */ mskql $4,$16,$4 /* .. E1 (and possible load stall) */ subq $16,$3,$16 /* E0 $16 is new aligned destination */ bis $2,$4,$1 /* .. E1 */ bis $31,$31,$31 /* E0 */ ldq_u $31,0($30) /* .. E1 */ stq_u $1,0($5) /* E0 */ bis $31,$31,$31 /* .. E1 */ .align 4 aligned: sra $18,3,$3 /* E0 */ and $18,7,$18 /* .. E1 */ bis $16,$16,$5 /* E0 */ beq $3,no_quad /* .. E1 */ .align 3 loop: stq $17,0($5) /* E0 */ subq $3,1,$3 /* .. E1 */ addq $5,8,$5 /* E0 */ bne $3,loop /* .. E1 */ no_quad: bis $31,$31,$31 /* E0 */ beq $18,end /* .. E1 */ ldq $7,0($5) /* E0 */ mskqh $7,$6,$2 /* .. E1 (and load stall) */ insqh $17,$6,$4 /* E0 */ bis $2,$4,$1 /* .. E1 */ stq $1,0($5) /* E0 */ ret $31,($26),1 /* .. E1 */ .align 3 within_one_quad: ldq_u $1,0($16) /* E0 */ insql $17,$16,$2 /* E1 */ mskql $1,$16,$4 /* E0 (after load stall) */ bis $2,$4,$2 /* E0 */ mskql $2,$6,$4 /* E0 */ mskqh $1,$6,$2 /* .. E1 */ bis $2,$4,$1 /* E0 */ stq_u $1,0($16) /* E0 */ end: ret $31,($26),1 /* E1 */ .end ___memset EXPORT_SYMBOL(___memset) EXPORT_SYMBOL(__constant_c_memset) .align 5 .ent __memset16 __memset16: .prologue 0 inswl $17,0,$1 /* E0 */ inswl $17,2,$2 /* E0 */ inswl $17,4,$3 /* E0 */ or $1,$2,$1 /* .. E1 */ inswl $17,6,$4 /* E0 */ or $1,$3,$1 /* .. E1 */ or $1,$4,$17 /* E0 */ br __constant_c_memset /* .. E1 */ .end __memset16 EXPORT_SYMBOL(__memset16) memset = ___memset __memset = ___memset EXPORT_SYMBOL(memset) EXPORT_SYMBOL(__memset)
aixcc-public/challenge-001-exemplar-source
5,302
arch/alpha/lib/ev6-csum_ipv6_magic.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-csum_ipv6_magic.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * unsigned short csum_ipv6_magic(struct in6_addr *saddr, * struct in6_addr *daddr, * __u32 len, * unsigned short proto, * unsigned int csum); * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. * Determining actual stalls (other than slotting) doesn't appear to be easy to do. * * unsigned short csum_ipv6_magic(struct in6_addr *saddr, * struct in6_addr *daddr, * __u32 len, * unsigned short proto, * unsigned int csum); * * Swap <proto> (takes form 0xaabb) * Then shift it left by 48, so result is: * 0xbbaa0000 00000000 * Then turn it back into a sign extended 32-bit item * 0xbbaa0000 * * Swap <len> (an unsigned int) using Mike Burrows' 7-instruction sequence * (we can't hide the 3-cycle latency of the unpkbw in the 6-instruction sequence) * Assume input takes form 0xAABBCCDD * * Finally, original 'folding' approach is to split the long into 4 unsigned shorts * add 4 ushorts, resulting in ushort/carry * add carry bits + ushort --> ushort * add carry bits + ushort --> ushort (in case the carry results in an overflow) * Truncate to a ushort. (took 13 instructions) * From doing some testing, using the approach in checksum.c:from64to16() * results in the same outcome: * split into 2 uints, add those, generating a ulong * add the 3 low ushorts together, generating a uint * a final add of the 2 lower ushorts * truncating the result. * * Misalignment handling added by Ivan Kokshaysky <ink@jurassic.park.msu.ru> * The cost is 16 instructions (~8 cycles), including two extra loads which * may cause additional delay in rare cases (load-load replay traps). */ #include <asm/export.h> .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic .frame $30,0,$26,0 csum_ipv6_magic: .prologue 0 ldq_u $0,0($16) # L : Latency: 3 inslh $18,7,$4 # U : 0000000000AABBCC ldq_u $1,8($16) # L : Latency: 3 sll $19,8,$7 # U : U L U L : 0x00000000 00aabb00 and $16,7,$6 # E : src misalignment ldq_u $5,15($16) # L : Latency: 3 zapnot $20,15,$20 # U : zero extend incoming csum ldq_u $2,0($17) # L : U L U L : Latency: 3 extql $0,$6,$0 # U : extqh $1,$6,$22 # U : ldq_u $3,8($17) # L : Latency: 3 sll $19,24,$19 # U : U U L U : 0x000000aa bb000000 cmoveq $6,$31,$22 # E : src aligned? ldq_u $23,15($17) # L : Latency: 3 inswl $18,3,$18 # U : 000000CCDD000000 addl $19,$7,$19 # E : U L U L : <sign bits>bbaabb00 or $0,$22,$0 # E : 1st src word complete extql $1,$6,$1 # U : or $18,$4,$18 # E : 000000CCDDAABBCC extqh $5,$6,$5 # U : L U L U and $17,7,$6 # E : dst misalignment extql $2,$6,$2 # U : or $1,$5,$1 # E : 2nd src word complete extqh $3,$6,$22 # U : L U L U : cmoveq $6,$31,$22 # E : dst aligned? extql $3,$6,$3 # U : addq $20,$0,$20 # E : begin summing the words extqh $23,$6,$23 # U : L U L U : srl $18,16,$4 # U : 0000000000CCDDAA or $2,$22,$2 # E : 1st dst word complete zap $19,0x3,$19 # U : <sign bits>bbaa0000 or $3,$23,$3 # E : U L U L : 2nd dst word complete cmpult $20,$0,$0 # E : addq $20,$1,$20 # E : zapnot $18,0xa,$18 # U : 00000000DD00BB00 zap $4,0xa,$4 # U : U U L L : 0000000000CC00AA or $18,$4,$18 # E : 00000000DDCCBBAA nop # E : cmpult $20,$1,$1 # E : addq $20,$2,$20 # E : U L U L cmpult $20,$2,$2 # E : addq $20,$3,$20 # E : cmpult $20,$3,$3 # E : (1 cycle stall on $20) addq $20,$18,$20 # E : U L U L (1 cycle stall on $20) cmpult $20,$18,$18 # E : addq $20,$19,$20 # E : (1 cycle stall on $20) addq $0,$1,$0 # E : merge the carries back into the csum addq $2,$3,$2 # E : cmpult $20,$19,$19 # E : addq $18,$19,$18 # E : (1 cycle stall on $19) addq $0,$2,$0 # E : addq $20,$18,$20 # E : U L U L : /* (1 cycle stall on $18, 2 cycles on $20) */ addq $0,$20,$0 # E : zapnot $0,15,$1 # U : Start folding output (1 cycle stall on $0) nop # E : srl $0,32,$0 # U : U L U L : (1 cycle stall on $0) addq $1,$0,$1 # E : Finished generating ulong extwl $1,2,$2 # U : ushort[1] (1 cycle stall on $1) zapnot $1,3,$0 # U : ushort[0] (1 cycle stall on $1) extwl $1,4,$1 # U : ushort[2] (1 cycle stall on $1) addq $0,$2,$0 # E addq $0,$1,$3 # E : Finished generating uint /* (1 cycle stall on $0) */ extwl $3,2,$1 # U : ushort[1] (1 cycle stall on $3) nop # E : L U L U addq $1,$3,$0 # E : Final carry not $0,$4 # E : complement (1 cycle stall on $0) zapnot $4,3,$0 # U : clear upper garbage bits /* (1 cycle stall on $4) */ ret # L0 : L U L U .end csum_ipv6_magic EXPORT_SYMBOL(csum_ipv6_magic)
aixcc-public/challenge-001-exemplar-source
2,116
arch/alpha/lib/copy_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/copy_user.S * * Copy to/from user space, handling exceptions as we go.. This * isn't exactly pretty. * * This is essentially the same as "memcpy()", but with a few twists. * Notably, we have to make sure that $0 is always up-to-date and * contains the right "bytes left to copy" value (and that it is updated * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitin-99b($31); \ .previous #define EXO(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitout-99b($31); \ .previous .set noat .align 4 .globl __copy_user .ent __copy_user __copy_user: .prologue 0 mov $18,$0 and $16,7,$3 beq $0,$35 beq $3,$36 subq $3,8,$3 .align 4 $37: EXI( ldq_u $1,0($17) ) EXO( ldq_u $2,0($16) ) extbl $1,$17,$1 mskbl $2,$16,$2 insbl $1,$16,$1 addq $3,1,$3 bis $1,$2,$1 EXO( stq_u $1,0($16) ) subq $0,1,$0 addq $16,1,$16 addq $17,1,$17 beq $0,$41 bne $3,$37 $36: and $17,7,$1 bic $0,7,$4 beq $1,$43 beq $4,$48 EXI( ldq_u $3,0($17) ) .align 4 $50: EXI( ldq_u $2,8($17) ) subq $4,8,$4 extql $3,$17,$3 extqh $2,$17,$1 bis $3,$1,$1 EXO( stq $1,0($16) ) addq $17,8,$17 subq $0,8,$0 addq $16,8,$16 bis $2,$2,$3 bne $4,$50 $48: beq $0,$41 .align 4 $57: EXI( ldq_u $1,0($17) ) EXO( ldq_u $2,0($16) ) extbl $1,$17,$1 mskbl $2,$16,$2 insbl $1,$16,$1 bis $1,$2,$1 EXO( stq_u $1,0($16) ) subq $0,1,$0 addq $16,1,$16 addq $17,1,$17 bne $0,$57 br $31,$41 .align 4 $43: beq $4,$65 .align 4 $66: EXI( ldq $1,0($17) ) subq $4,8,$4 EXO( stq $1,0($16) ) addq $17,8,$17 subq $0,8,$0 addq $16,8,$16 bne $4,$66 $65: beq $0,$41 EXI( ldq $2,0($17) ) EXO( ldq $1,0($16) ) mskql $2,$0,$2 mskqh $1,$0,$1 bis $2,$1,$2 EXO( stq $2,0($16) ) bis $31,$31,$0 $41: $35: $exitin: $exitout: ret $31,($26),1 .end __copy_user EXPORT_SYMBOL(__copy_user)
aixcc-public/challenge-001-exemplar-source
6,508
arch/alpha/lib/ev6-divide.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-divide.S * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Alpha division.. */ /* * The alpha chip doesn't provide hardware division, so we have to do it * by hand. The compiler expects the functions * * __divqu: 64-bit unsigned long divide * __remqu: 64-bit unsigned long remainder * __divqs/__remqs: signed 64-bit * __divlu/__remlu: unsigned 32-bit * __divls/__remls: signed 32-bit * * These are not normal C functions: instead of the normal * calling sequence, these expect their arguments in registers * $24 and $25, and return the result in $27. Register $28 may * be clobbered (assembly temporary), anything else must be saved. * * In short: painful. * * This is a rather simple bit-at-a-time algorithm: it's very good * at dividing random 64-bit numbers, but the more usual case where * the divisor is small is handled better by the DEC algorithm * using lookup tables. This uses much less memory, though, and is * nicer on the cache.. Besides, I don't know the copyright status * of the DEC code. */ /* * My temporaries: * $0 - current bit * $1 - shifted divisor * $2 - modulus/quotient * * $23 - return address * $24 - dividend * $25 - divisor * * $27 - quotient/modulus * $28 - compare status * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> #define halt .long 0 /* * Select function type and registers */ #define mask $0 #define divisor $1 #define compare $28 #define tmp1 $3 #define tmp2 $4 #ifdef DIV #define DIV_ONLY(x,y...) x,##y #define MOD_ONLY(x,y...) #define func(x) __div##x #define modulus $2 #define quotient $27 #define GETSIGN(x) xor $24,$25,x #define STACK 48 #else #define DIV_ONLY(x,y...) #define MOD_ONLY(x,y...) x,##y #define func(x) __rem##x #define modulus $27 #define quotient $2 #define GETSIGN(x) bis $24,$24,x #define STACK 32 #endif /* * For 32-bit operations, we need to extend to 64-bit */ #ifdef INTSIZE #define ufunction func(lu) #define sfunction func(l) #define LONGIFY(x) zapnot x,15,x #define SLONGIFY(x) addl x,0,x #else #define ufunction func(qu) #define sfunction func(q) #define LONGIFY(x) #define SLONGIFY(x) #endif .set noat .align 4 .globl ufunction .ent ufunction ufunction: subq $30,STACK,$30 # E : .frame $30,STACK,$23 .prologue 0 7: stq $1, 0($30) # L : bis $25,$25,divisor # E : stq $2, 8($30) # L : L U L U bis $24,$24,modulus # E : stq $0,16($30) # L : bis $31,$31,quotient # E : LONGIFY(divisor) # E : U L L U stq tmp1,24($30) # L : LONGIFY(modulus) # E : bis $31,1,mask # E : DIV_ONLY(stq tmp2,32($30)) # L : L U U L beq divisor, 9f /* div by zero */ /* * In spite of the DIV_ONLY being either a non-instruction * or an actual stq, the addition of the .align directive * below ensures that label 1 is going to be nicely aligned */ .align 4 #ifdef INTSIZE /* * shift divisor left, using 3-bit shifts for * 32-bit divides as we can't overflow. Three-bit * shifts will result in looping three times less * here, but can result in two loops more later. * Thus using a large shift isn't worth it (and * s8add pairs better than a sll..) */ 1: cmpult divisor,modulus,compare # E : s8addq divisor,$31,divisor # E : s8addq mask,$31,mask # E : bne compare,1b # U : U L U L #else 1: cmpult divisor,modulus,compare # E : nop # E : nop # E : blt divisor, 2f # U : U L U L addq divisor,divisor,divisor # E : addq mask,mask,mask # E : unop # E : bne compare,1b # U : U L U L #endif /* ok, start to go right again.. */ 2: /* * Keep things nicely bundled... use a nop instead of not * having an instruction for DIV_ONLY */ #ifdef DIV DIV_ONLY(addq quotient,mask,tmp2) # E : #else nop # E : #endif srl mask,1,mask # U : cmpule divisor,modulus,compare # E : subq modulus,divisor,tmp1 # E : #ifdef DIV DIV_ONLY(cmovne compare,tmp2,quotient) # E : Latency 2, extra map slot nop # E : as part of the cmovne srl divisor,1,divisor # U : nop # E : L U L U nop # E : cmovne compare,tmp1,modulus # E : Latency 2, extra map slot nop # E : as part of the cmovne bne mask,2b # U : U L U L #else srl divisor,1,divisor # U : cmovne compare,tmp1,modulus # E : Latency 2, extra map slot nop # E : as part of the cmovne bne mask,2b # U : U L L U #endif 9: ldq $1, 0($30) # L : ldq $2, 8($30) # L : nop # E : nop # E : U U L L ldq $0,16($30) # L : ldq tmp1,24($30) # L : nop # E : nop # E : #ifdef DIV DIV_ONLY(ldq tmp2,32($30)) # L : #else nop # E : #endif addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end ufunction EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but * it's needed in some circumstances. There are different ways to * handle this, really. This does: * -a / b = a / -b = -(a / b) * -a % b = -(a % b) * a % -b = a % b * which is probably not the best solution, but at least should * have the property that (x/y)*y + (x%y) = x. */ .align 4 .globl sfunction .ent sfunction sfunction: subq $30,STACK,$30 # E : .frame $30,STACK,$23 .prologue 0 bis $24,$25,$28 # E : SLONGIFY($28) # E : bge $28,7b # U : stq $24,0($30) # L : subq $31,$24,$28 # E : stq $25,8($30) # L : nop # E : U L U L cmovlt $24,$28,$24 /* abs($24) */ # E : Latency 2, extra map slot nop # E : as part of the cmov stq $23,16($30) # L : subq $31,$25,$28 # E : U L U L stq tmp1,24($30) # L : cmovlt $25,$28,$25 /* abs($25) */ # E : Latency 2, extra map slot nop # E : bsr $23,ufunction # L0: L U L U ldq $24,0($30) # L : ldq $25,8($30) # L : GETSIGN($28) # E : subq $31,$27,tmp1 # E : U U L L SLONGIFY($28) # E : ldq $23,16($30) # L : cmovlt $28,tmp1,$27 # E : Latency 2, extra map slot nop # E : U L L U : as part of the cmov ldq tmp1,24($30) # L : nop # E : as part of the cmov addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end sfunction EXPORT_SYMBOL(sfunction)
aixcc-public/challenge-001-exemplar-source
2,980
arch/alpha/lib/csum_ipv6_magic.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/csum_ipv6_magic.S * Contributed by Richard Henderson <rth@tamu.edu> * * unsigned short csum_ipv6_magic(struct in6_addr *saddr, * struct in6_addr *daddr, * __u32 len, * unsigned short proto, * unsigned int csum); * * Misalignment handling (which costs 16 instructions / 8 cycles) * added by Ivan Kokshaysky <ink@jurassic.park.msu.ru> */ #include <asm/export.h> .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic .frame $30,0,$26,0 csum_ipv6_magic: .prologue 0 ldq_u $0,0($16) # e0 : load src & dst addr words zapnot $20,15,$20 # .. e1 : zero extend incoming csum extqh $18,1,$4 # e0 : byte swap len & proto while we wait ldq_u $21,7($16) # .. e1 : handle misalignment extbl $18,1,$5 # e0 : ldq_u $1,8($16) # .. e1 : extbl $18,2,$6 # e0 : ldq_u $22,15($16) # .. e1 : extbl $18,3,$18 # e0 : ldq_u $2,0($17) # .. e1 : sra $4,32,$4 # e0 : ldq_u $23,7($17) # .. e1 : extql $0,$16,$0 # e0 : ldq_u $3,8($17) # .. e1 : extqh $21,$16,$21 # e0 : ldq_u $24,15($17) # .. e1 : sll $5,16,$5 # e0 : or $0,$21,$0 # .. e1 : 1st src word complete extql $1,$16,$1 # e0 : addq $20,$0,$20 # .. e1 : begin summing the words extqh $22,$16,$22 # e0 : cmpult $20,$0,$0 # .. e1 : sll $6,8,$6 # e0 : or $1,$22,$1 # .. e1 : 2nd src word complete extql $2,$17,$2 # e0 : or $4,$18,$18 # .. e1 : extqh $23,$17,$23 # e0 : or $5,$6,$5 # .. e1 : extql $3,$17,$3 # e0 : or $2,$23,$2 # .. e1 : 1st dst word complete extqh $24,$17,$24 # e0 : or $18,$5,$18 # .. e1 : len complete extwh $19,7,$7 # e0 : or $3,$24,$3 # .. e1 : 2nd dst word complete extbl $19,1,$19 # e0 : addq $20,$1,$20 # .. e1 : or $19,$7,$19 # e0 : cmpult $20,$1,$1 # .. e1 : sll $19,48,$19 # e0 : nop # .. e0 : sra $19,32,$19 # e0 : proto complete addq $20,$2,$20 # .. e1 : cmpult $20,$2,$2 # e0 : addq $20,$3,$20 # .. e1 : cmpult $20,$3,$3 # e0 : addq $20,$18,$20 # .. e1 : cmpult $20,$18,$18 # e0 : addq $20,$19,$20 # .. e1 : cmpult $20,$19,$19 # e0 : addq $0,$1,$0 # .. e1 : merge the carries back into the csum addq $2,$3,$2 # e0 : addq $18,$19,$18 # .. e1 : addq $0,$2,$0 # e0 : addq $20,$18,$20 # .. e1 : addq $0,$20,$0 # e0 : unop # : extwl $0,2,$2 # e0 : begin folding the 64-bit value zapnot $0,3,$3 # .. e1 : extwl $0,4,$1 # e0 : addq $2,$3,$3 # .. e1 : extwl $0,6,$0 # e0 : addq $3,$1,$3 # .. e1 : addq $0,$3,$0 # e0 : unop # : extwl $0,2,$1 # e0 : fold 18-bit value zapnot $0,3,$0 # .. e1 : addq $0,$1,$0 # e0 : unop # : extwl $0,2,$1 # e0 : fold 17-bit value zapnot $0,3,$0 # .. e1 : addq $0,$1,$0 # e0 : not $0,$0 # .. e1 : and complement. zapnot $0,3,$0 # e0 : ret # .. e1 : .end csum_ipv6_magic EXPORT_SYMBOL(csum_ipv6_magic)
aixcc-public/challenge-001-exemplar-source
2,854
arch/alpha/lib/strrchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strrchr.S * Contributed by Richard Henderson (rth@tamu.edu) * * Return the address of the last occurrence of a given character * within a null-terminated string, or null if it is not found. */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 3 .ent strrchr .globl strrchr strrchr: .frame sp, 0, ra .prologue 0 zapnot a1, 1, a1 # e0 : zero extend our test character mov zero, t6 # .. e1 : t6 is last match aligned addr sll a1, 8, t5 # e0 : replicate our test character mov zero, t8 # .. e1 : t8 is last match byte compare mask or t5, a1, a1 # e0 : ldq_u t0, 0(a0) # .. e1 : load first quadword sll a1, 16, t5 # e0 : andnot a0, 7, v0 # .. e1 : align source addr or t5, a1, a1 # e0 : lda t4, -1 # .. e1 : build garbage mask sll a1, 32, t5 # e0 : cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero mskqh t4, a0, t4 # e0 : or t5, a1, a1 # .. e1 : character replication complete xor t0, a1, t2 # e0 : make bytes == c zero cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage cmpbge zero, t2, t3 # e0 : bits set iff byte == c andnot t1, t4, t1 # .. e1 : clear garbage from null test andnot t3, t4, t3 # e0 : clear garbage from char test bne t1, $eos # .. e1 : did we already hit the terminator? /* Character search main loop */ $loop: ldq t0, 8(v0) # e0 : load next quadword cmovne t3, v0, t6 # .. e1 : save previous comparisons match cmovne t3, t3, t8 # e0 : addq v0, 8, v0 # .. e1 : xor t0, a1, t2 # e0 : cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero cmpbge zero, t2, t3 # e0 : bits set iff byte == c beq t1, $loop # .. e1 : if we havnt seen a null, loop /* Mask out character matches after terminator */ $eos: negq t1, t4 # e0 : isolate first null byte match and t1, t4, t4 # e1 : subq t4, 1, t5 # e0 : build a mask of the bytes up to... or t4, t5, t4 # e1 : ... and including the null and t3, t4, t3 # e0 : mask out char matches after null cmovne t3, t3, t8 # .. e1 : save it, if match found cmovne t3, v0, t6 # e0 : /* Locate the address of the last matched character */ /* Retain the early exit for the ev4 -- the ev5 mispredict penalty is 5 cycles -- the same as just falling through. */ beq t8, $retnull # .. e1 : and t8, 0xf0, t2 # e0 : binary search for the high bit set cmovne t2, t2, t8 # .. e1 (zdb) cmovne t2, 4, t2 # e0 : and t8, 0xcc, t1 # .. e1 : cmovne t1, t1, t8 # e0 : cmovne t1, 2, t1 # .. e1 : and t8, 0xaa, t0 # e0 : cmovne t0, 1, t0 # .. e1 (zdb) addq t2, t1, t1 # e0 : addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix addq v0, t1, v0 # e0 : ret # .. e1 : $retnull: mov zero, v0 # e0 : ret # .. e1 : .end strrchr EXPORT_SYMBOL(strrchr)
aixcc-public/challenge-001-exemplar-source
3,342
arch/alpha/lib/udiv-qrnnd.S
# Alpha 21064 __udiv_qrnnd # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc. # This file is part of GCC. # The GNU MP Library is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # In addition to the permissions in the GNU General Public License, the # Free Software Foundation gives you unlimited permission to link the # compiled version of this file with other programs, and to distribute # those programs without any restriction coming from the use of this # file. (The General Public License restrictions do apply in other # respects; for example, they cover modification of the file, and # distribution when not linked into another program.) # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public # License for more details. # You should have received a copy of the GNU General Public License # along with GCC; see the file COPYING. If not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, # MA 02111-1307, USA. #include <asm/export.h> .set noreorder .set noat .text .globl __udiv_qrnnd .ent __udiv_qrnnd __udiv_qrnnd: .frame $30,0,$26,0 .prologue 0 #define cnt $2 #define tmp $3 #define rem_ptr $16 #define n1 $17 #define n0 $18 #define d $19 #define qb $20 #define AT $at ldiq cnt,16 blt d,$largedivisor $loop1: cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 subq cnt,1,cnt bgt cnt,$loop1 stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 $largedivisor: and n0,1,$4 srl n0,1,n0 sll n1,63,tmp or tmp,n0,n0 srl n1,1,n1 and d,1,$6 srl d,1,$5 addq $5,$6,$5 $loop2: cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 subq cnt,1,cnt bgt cnt,$loop2 addq n1,n1,n1 addq $4,n1,n1 bne $6,$Odd stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 $Odd: /* q' in n0. r' in n1 */ addq n1,n0,n1 cmpult n1,n0,tmp # tmp := carry from addq subq n1,d,AT addq n0,tmp,n0 cmovne tmp,AT,n1 cmpult n1,d,tmp addq n0,1,AT cmoveq tmp,AT,n0 subq n1,d,AT cmoveq tmp,AT,n1 stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 .end __udiv_qrnnd EXPORT_SYMBOL(__udiv_qrnnd)
aixcc-public/challenge-001-exemplar-source
3,379
arch/alpha/lib/ev67-strrchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strrchr.S * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com> * * Finds length of a 0-terminated string. Optimized for the * Alpha architecture: * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 4 .ent strrchr .globl strrchr strrchr: .frame sp, 0, ra .prologue 0 and a1, 0xff, t2 # E : 00000000000000ch insbl a1, 1, t4 # U : 000000000000ch00 insbl a1, 2, t5 # U : 0000000000ch0000 ldq_u t0, 0(a0) # L : load first quadword Latency=3 mov zero, t6 # E : t6 is last match aligned addr or t2, t4, a1 # E : 000000000000chch sll t5, 8, t3 # U : 00000000ch000000 mov zero, t8 # E : t8 is last match byte compare mask andnot a0, 7, v0 # E : align source addr or t5, t3, t3 # E : 00000000chch0000 sll a1, 32, t2 # U : 0000chch00000000 sll a1, 48, t4 # U : chch000000000000 or t4, a1, a1 # E : chch00000000chch or t2, t3, t2 # E : 0000chchchch0000 or a1, t2, a1 # E : chchchchchchchch lda t5, -1 # E : build garbage mask cmpbge zero, t0, t1 # E : bits set iff byte == zero mskqh t5, a0, t4 # E : Complete garbage mask xor t0, a1, t2 # E : make bytes == c zero cmpbge zero, t4, t4 # E : bits set iff byte is garbage cmpbge zero, t2, t3 # E : bits set iff byte == c andnot t1, t4, t1 # E : clear garbage from null test andnot t3, t4, t3 # E : clear garbage from char test bne t1, $eos # U : did we already hit the terminator? /* Character search main loop */ $loop: ldq t0, 8(v0) # L : load next quadword cmovne t3, v0, t6 # E : save previous comparisons match nop # : Latency=2, extra map slot (keep nop with cmov) nop cmovne t3, t3, t8 # E : Latency=2, extra map slot nop # : keep with cmovne addq v0, 8, v0 # E : xor t0, a1, t2 # E : cmpbge zero, t0, t1 # E : bits set iff byte == zero cmpbge zero, t2, t3 # E : bits set iff byte == c beq t1, $loop # U : if we havnt seen a null, loop nop /* Mask out character matches after terminator */ $eos: negq t1, t4 # E : isolate first null byte match and t1, t4, t4 # E : subq t4, 1, t5 # E : build a mask of the bytes up to... or t4, t5, t4 # E : ... and including the null and t3, t4, t3 # E : mask out char matches after null cmovne t3, t3, t8 # E : save it, if match found Latency=2, extra map slot nop # : Keep with cmovne nop cmovne t3, v0, t6 # E : nop # : Keep with cmovne /* Locate the address of the last matched character */ ctlz t8, t2 # U0 : Latency=3 (0x40 for t8=0) nop cmoveq t8, 0x3f, t2 # E : Compensate for case when no match is seen nop # E : hide the cmov latency (2) behind ctlz latency lda t5, 0x3f($31) # E : subq t5, t2, t5 # E : Normalize leading zero count addq t6, t5, v0 # E : and add to quadword address ret # L0 : Latency=3 nop nop .end strrchr EXPORT_SYMBOL(strrchr)
aixcc-public/challenge-001-exemplar-source
5,047
arch/alpha/lib/memchr.S
/* Copyright (C) 1996 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by David Mosberger (davidm@cs.arizona.edu). The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Finds characters in a memory area. Optimized for the Alpha: - memory accessed as aligned quadwords only - uses cmpbge to compare 8 bytes in parallel - does binary search to find 0 byte in last quadword (HAKMEM needed 12 instructions to do this instead of the 9 instructions that binary search needs). For correctness consider that: - only minimum number of quadwords may be accessed - the third argument is an unsigned long */ #include <asm/export.h> .set noreorder .set noat .globl memchr .ent memchr memchr: .frame $30,0,$26,0 .prologue 0 # Hack -- if someone passes in (size_t)-1, hoping to just # search til the end of the address space, we will overflow # below when we find the address of the last byte. Given # that we will never have a 56-bit address space, cropping # the length is the easiest way to avoid trouble. zap $18, 0x80, $5 #-e0 : beq $18, $not_found # .. e1 : ldq_u $1, 0($16) # e1 : load first quadword insbl $17, 1, $2 # .. e0 : $2 = 000000000000ch00 and $17, 0xff, $17 #-e0 : $17 = 00000000000000ch cmpult $18, 9, $4 # .. e1 : or $2, $17, $17 # e0 : $17 = 000000000000chch lda $3, -1($31) # .. e1 : sll $17, 16, $2 #-e0 : $2 = 00000000chch0000 addq $16, $5, $5 # .. e1 : or $2, $17, $17 # e1 : $17 = 00000000chchchch unop # : sll $17, 32, $2 #-e0 : $2 = chchchch00000000 or $2, $17, $17 # e1 : $17 = chchchchchchchch extql $1, $16, $7 # e0 : beq $4, $first_quad # .. e1 : ldq_u $6, -1($5) #-e1 : eight or less bytes to search extqh $6, $16, $6 # .. e0 : mov $16, $0 # e0 : or $7, $6, $1 # .. e1 : $1 = quadword starting at $16 # Deal with the case where at most 8 bytes remain to be searched # in $1. E.g.: # $18 = 6 # $1 = ????c6c5c4c3c2c1 $last_quad: negq $18, $6 #-e0 : xor $17, $1, $1 # .. e1 : srl $3, $6, $6 # e0 : $6 = mask of $18 bits set cmpbge $31, $1, $2 # .. e1 : and $2, $6, $2 #-e0 : beq $2, $not_found # .. e1 : $found_it: # Now, determine which byte matched: negq $2, $3 # e0 : and $2, $3, $2 # e1 : and $2, 0x0f, $1 #-e0 : addq $0, 4, $3 # .. e1 : cmoveq $1, $3, $0 # e0 : addq $0, 2, $3 # .. e1 : and $2, 0x33, $1 #-e0 : cmoveq $1, $3, $0 # .. e1 : and $2, 0x55, $1 # e0 : addq $0, 1, $3 # .. e1 : cmoveq $1, $3, $0 #-e0 : $done: ret # .. e1 : # Deal with the case where $18 > 8 bytes remain to be # searched. $16 may not be aligned. .align 4 $first_quad: andnot $16, 0x7, $0 #-e1 : insqh $3, $16, $2 # .. e0 : $2 = 0000ffffffffffff ($16<0:2> ff) xor $1, $17, $1 # e0 : or $1, $2, $1 # e1 : $1 = ====ffffffffffff cmpbge $31, $1, $2 #-e0 : bne $2, $found_it # .. e1 : # At least one byte left to process. ldq $1, 8($0) # e0 : subq $5, 1, $18 # .. e1 : addq $0, 8, $0 #-e0 : # Make $18 point to last quad to be accessed (the # last quad may or may not be partial). andnot $18, 0x7, $18 # .. e1 : cmpult $0, $18, $2 # e0 : beq $2, $final # .. e1 : # At least two quads remain to be accessed. subq $18, $0, $4 #-e0 : $4 <- nr quads to be processed and $4, 8, $4 # e1 : odd number of quads? bne $4, $odd_quad_count # e1 : # At least three quads remain to be accessed mov $1, $4 # e0 : move prefetched value to correct reg .align 4 $unrolled_loop: ldq $1, 8($0) #-e0 : prefetch $1 xor $17, $4, $2 # .. e1 : cmpbge $31, $2, $2 # e0 : bne $2, $found_it # .. e1 : addq $0, 8, $0 #-e0 : $odd_quad_count: xor $17, $1, $2 # .. e1 : ldq $4, 8($0) # e0 : prefetch $4 cmpbge $31, $2, $2 # .. e1 : addq $0, 8, $6 #-e0 : bne $2, $found_it # .. e1 : cmpult $6, $18, $6 # e0 : addq $0, 8, $0 # .. e1 : bne $6, $unrolled_loop #-e1 : mov $4, $1 # e0 : move prefetched value into $1 $final: subq $5, $0, $18 # .. e1 : $18 <- number of bytes left to do bne $18, $last_quad # e1 : $not_found: mov $31, $0 #-e0 : ret # .. e1 : .end memchr EXPORT_SYMBOL(memchr)
aixcc-public/challenge-001-exemplar-source
4,341
arch/alpha/lib/ev6-copy_page.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-copy_page.S * * Copy an entire page. */ /* The following comparison of this routine vs the normal copy_page.S was written by an unnamed ev6 hardware designer and forwarded to me via Steven Hobbs <hobbs@steven.zko.dec.com>. First Problem: STQ overflows. ----------------------------- It would be nice if EV6 handled every resource overflow efficiently, but for some it doesn't. Including store queue overflows. It causes a trap and a restart of the pipe. To get around this we sometimes use (to borrow a term from a VSSAD researcher) "aeration". The idea is to slow the rate at which the processor receives valid instructions by inserting nops in the fetch path. In doing so, you can prevent the overflow and actually make the code run faster. You can, of course, take advantage of the fact that the processor can fetch at most 4 aligned instructions per cycle. I inserted enough nops to force it to take 10 cycles to fetch the loop code. In theory, EV6 should be able to execute this loop in 9 cycles but I was not able to get it to run that fast -- the initial conditions were such that I could not reach this optimum rate on (chaotic) EV6. I wrote the code such that everything would issue in order. Second Problem: Dcache index matches. ------------------------------------- If you are going to use this routine on random aligned pages, there is a 25% chance that the pages will be at the same dcache indices. This results in many nasty memory traps without care. The solution is to schedule the prefetches to avoid the memory conflicts. I schedule the wh64 prefetches farther ahead of the read prefetches to avoid this problem. Third Problem: Needs more prefetching. -------------------------------------- In order to improve the code I added deeper prefetching to take the most advantage of EV6's bandwidth. I also prefetched the read stream. Note that adding the read prefetch forced me to add another cycle to the inner-most kernel - up to 11 from the original 8 cycles per iteration. We could improve performance further by unrolling the loop and doing multiple prefetches per cycle. I think that the code below will be very robust and fast code for the purposes of copying aligned pages. It is slower when both source and destination pages are in the dcache, but it is my guess that this is less important than the dcache miss case. */ #include <asm/export.h> .text .align 4 .global copy_page .ent copy_page copy_page: .prologue 0 /* Prefetch 5 read cachelines; write-hint 10 cache lines. */ wh64 ($16) ldl $31,0($17) ldl $31,64($17) lda $1,1*64($16) wh64 ($1) ldl $31,128($17) ldl $31,192($17) lda $1,2*64($16) wh64 ($1) ldl $31,256($17) lda $18,118 lda $1,3*64($16) wh64 ($1) nop lda $1,4*64($16) lda $2,5*64($16) wh64 ($1) wh64 ($2) lda $1,6*64($16) lda $2,7*64($16) wh64 ($1) wh64 ($2) lda $1,8*64($16) lda $2,9*64($16) wh64 ($1) wh64 ($2) lda $19,10*64($16) nop /* Main prefetching/write-hinting loop. */ 1: ldq $0,0($17) ldq $1,8($17) unop unop unop unop ldq $2,16($17) ldq $3,24($17) ldq $4,32($17) ldq $5,40($17) unop unop unop unop ldq $6,48($17) ldq $7,56($17) ldl $31,320($17) unop unop unop /* This gives the extra cycle of aeration above the minimum. */ unop unop unop unop wh64 ($19) unop unop unop stq $0,0($16) subq $18,1,$18 stq $1,8($16) unop unop stq $2,16($16) addq $17,64,$17 stq $3,24($16) stq $4,32($16) stq $5,40($16) addq $19,64,$19 unop stq $6,48($16) stq $7,56($16) addq $16,64,$16 bne $18, 1b /* Prefetch the final 5 cache lines of the read stream. */ lda $18,10 ldl $31,320($17) ldl $31,384($17) ldl $31,448($17) ldl $31,512($17) ldl $31,576($17) nop nop /* Non-prefetching, non-write-hinting cleanup loop for the final 10 cache lines. */ 2: ldq $0,0($17) ldq $1,8($17) ldq $2,16($17) ldq $3,24($17) ldq $4,32($17) ldq $5,40($17) ldq $6,48($17) ldq $7,56($17) stq $0,0($16) subq $18,1,$18 stq $1,8($16) addq $17,64,$17 stq $2,16($16) stq $3,24($16) stq $4,32($16) stq $5,40($16) stq $6,48($16) stq $7,56($16) addq $16,64,$16 bne $18, 2b ret nop unop nop .end copy_page EXPORT_SYMBOL(copy_page)
aixcc-public/challenge-001-exemplar-source
7,189
arch/alpha/lib/ev6-clear_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-clear_user.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Zero user space, handling exceptions as we go. * * We have to make sure that $0 is always up-to-date and contains the * right "bytes left to zero" value (and that it is updated only _after_ * a successful copy). There is also some rather minor exception setup * stuff. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. * Determining actual stalls (other than slotting) doesn't appear to be easy to do. * From perusing the source code context where this routine is called, it is * a fair assumption that significant fractions of entire pages are zeroed, so * it's going to be worth the effort to hand-unroll a big loop, and use wh64. * ASSUMPTION: * The believed purpose of only updating $0 after a store is that a signal * may come along during the execution of this chunk of code, and we don't * want to leave a hole (and we also want to avoid repeating lots of work) */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exception-99b($31); \ .previous .set noat .set noreorder .align 4 .globl __clear_user .ent __clear_user .frame $30, 0, $26 .prologue 0 # Pipeline info : Slotting & Comments __clear_user: and $17, $17, $0 and $16, 7, $4 # .. E .. .. : find dest head misalignment beq $0, $zerolength # U .. .. .. : U L U L addq $0, $4, $1 # .. .. .. E : bias counter and $1, 7, $2 # .. .. E .. : number of misaligned bytes in tail # Note - we never actually use $2, so this is a moot computation # and we can rewrite this later... srl $1, 3, $1 # .. E .. .. : number of quadwords to clear beq $4, $headalign # U .. .. .. : U L U L /* * Head is not aligned. Write (8 - $4) bytes to head of destination * This means $16 is known to be misaligned */ EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in beq $1, $onebyte # .. .. U .. : sub-word store? mskql $5, $16, $5 # .. U .. .. : take care of misaligned head addq $16, 8, $16 # E .. .. .. : L U U L EX( stq_u $5, -8($16) ) # .. .. .. L : subq $1, 1, $1 # .. .. E .. : addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment subq $0, 8, $0 # E .. .. .. : U L U L .align 4 /* * (The .align directive ought to be a moot point) * values upon initial entry to the loop * $1 is number of quadwords to clear (zero is a valid value) * $2 is number of trailing bytes (0..7) ($2 never used...) * $16 is known to be aligned 0mod8 */ $headalign: subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) blt $4, $trailquad # U .. .. .. : U L U L /* * We know that we're going to do at least 16 quads, which means we are * going to be able to use the large block clear loop at least once. * Figure out how many quads we need to clear before we are 0mod64 aligned * so we can use the wh64 instruction. */ nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 $alignmod64: EX( stq_u $31, 0($16) ) # .. .. .. L addq $3, 8, $3 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E subq $1, 1, $1 # .. .. E .. addq $16, 8, $16 # .. E .. .. blt $3, $alignmod64 # U .. .. .. : U L U L $bigalign: /* * $0 is the number of bytes left * $1 is the number of quads left * $16 is aligned 0mod64 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * We are _not_ going to update $0 after every single store. That * would be silly, because there will be cross-cluster dependencies * no matter how the code is scheduled. By doing it in slightly * staggered fashion, we can still do this loop in 5 fetches * The worse case will be doing two extra quads in some future execution, * in the event of an interrupted clear. * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ nop # E : nop # E : nop # E : bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest /* This might actually help for the current trip... */ $do_wh64: wh64 ($3) # .. .. .. L1 : memory subsystem hint subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? EX( stq_u $31, 0($16) ) # .. L .. .. subq $0, 8, $0 # E .. .. .. : U L U L addq $16, 128, $3 # E : Target address of wh64 EX( stq_u $31, 8($16) ) # L : EX( stq_u $31, 16($16) ) # L : subq $0, 16, $0 # E : U L L U nop # E : EX( stq_u $31, 24($16) ) # L : EX( stq_u $31, 32($16) ) # L : subq $0, 168, $5 # E : U L L U : two trips through the loop left? /* 168 = 192 - 24, since we've already completed some stores */ subq $0, 16, $0 # E : EX( stq_u $31, 40($16) ) # L : EX( stq_u $31, 48($16) ) # L : cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle subq $1, 8, $1 # E : subq $0, 16, $0 # E : EX( stq_u $31, 56($16) ) # L : nop # E : U L U L nop # E : subq $0, 8, $0 # E : addq $16, 64, $16 # E : bge $4, $do_wh64 # U : U L U L $trailquad: # zero to 16 quadwords left to store, plus any trailing bytes # $1 is the number of quadwords left to go. # nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go $onequad: EX( stq_u $31, 0($16) ) # .. .. .. L subq $1, 1, $1 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E nop # .. .. E .. addq $16, 8, $16 # .. E .. .. bgt $1, $onequad # U .. .. .. : U L U L # We have an unknown number of bytes left to go. $trailbytes: nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $0, $zerolength # U .. .. .. : U L U L # $0 contains the number of bytes left to copy (0..31) # so we will use $0 as the loop counter # We know for a fact that $0 > 0 zero due to previous context $onebyte: EX( stb $31, 0($16) ) # .. .. .. L subq $0, 1, $0 # .. .. E .. : addq $16, 1, $16 # .. E .. .. : bgt $0, $onebyte # U .. .. .. : U L U L $zerolength: $exception: # Destination for exception recovery(?) nop # .. .. .. E : nop # .. .. E .. : nop # .. E .. .. : ret $31, ($26), 1 # L0 .. .. .. : L U L U .end __clear_user EXPORT_SYMBOL(__clear_user)
aixcc-public/challenge-001-exemplar-source
1,220
arch/alpha/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * strlen.S (c) 1995 David Mosberger (davidm@cs.arizona.edu) * * Finds length of a 0-terminated string. Optimized for the * Alpha architecture: * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * - does binary search to find 0 byte in last * quadword (HAKMEM needed 12 instructions to * do this instead of the 9 instructions that * binary search needs). */ #include <asm/export.h> .set noreorder .set noat .align 3 .globl strlen .ent strlen strlen: ldq_u $1, 0($16) # load first quadword ($16 may be misaligned) lda $2, -1($31) insqh $2, $16, $2 andnot $16, 7, $0 or $2, $1, $1 cmpbge $31, $1, $2 # $2 <- bitmask: bit i == 1 <==> i-th byte == 0 bne $2, found loop: ldq $1, 8($0) addq $0, 8, $0 # addr += 8 nop # helps dual issue last two insns cmpbge $31, $1, $2 beq $2, loop found: blbs $2, done # make aligned case fast negq $2, $3 and $2, $3, $2 and $2, 0x0f, $1 addq $0, 4, $3 cmoveq $1, $3, $0 and $2, 0x33, $1 addq $0, 2, $3 cmoveq $1, $3, $0 and $2, 0x55, $1 addq $0, 1, $3 cmoveq $1, $3, $0 done: subq $0, $16, $0 ret $31, ($26) .end strlen EXPORT_SYMBOL(strlen)
aixcc-public/challenge-001-exemplar-source
2,502
arch/alpha/lib/ev67-strncat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strncat.S * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com> * * Append no more than COUNT characters from the null-terminated string SRC * to the null-terminated string DST. Always null-terminate the new DST. * * This differs slightly from the semantics in libc in that we never write * past count, whereas libc may write to count+1. This follows the generic * implementation in lib/string.c and is, IMHO, more sensible. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> .text .align 4 .globl strncat .ent strncat strncat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set up return value beq $18, $zerocount # U : /* Find the end of the string. */ ldq_u $1, 0($16) # L : load first quadword ($16 may be misaligned) lda $2, -1($31) # E : insqh $2, $0, $2 # U : andnot $16, 7, $16 # E : nop # E : or $2, $1, $1 # E : nop # E : nop # E : cmpbge $31, $1, $2 # E : bits set iff byte == 0 bne $2, $found # U : $loop: ldq $1, 8($16) # L : addq $16, 8, $16 # E : cmpbge $31, $1, $2 # E : beq $2, $loop # U : $found: cttz $2, $3 # U0 : addq $16, $3, $16 # E : nop # E : bsr $23, __stxncpy # L0 :/* Now do the append. */ /* Worry about the null termination. */ zapnot $1, $27, $2 # U : was last byte a null? cmplt $27, $24, $5 # E : did we fill the buffer completely? bne $2, 0f # U : ret # L0 : 0: or $5, $18, $2 # E : nop bne $2, 2f # U : and $24, 0x80, $3 # E : no zero next byte nop # E : bne $3, 1f # U : /* Here there are bytes left in the current word. Clear one. */ addq $24, $24, $24 # E : end-of-count bit <<= 1 nop # E : 2: zap $1, $24, $1 # U : nop # E : stq_u $1, 0($16) # L : ret # L0 : 1: /* Here we must clear the first byte of the next DST word */ stb $31, 8($16) # L : nop # E : nop # E : ret # L0 : $zerocount: nop # E : nop # E : nop # E : ret # L0 : .end strncat EXPORT_SYMBOL(strncat)
aixcc-public/challenge-001-exemplar-source
1,032
arch/alpha/lib/strcat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strcat.S * Contributed by Richard Henderson (rth@tamu.edu) * * Append a null-terminated string from SRC to DST. */ #include <asm/export.h> .text .align 3 .globl strcat .ent strcat strcat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set up return value /* Find the end of the string. */ ldq_u $1, 0($16) # load first quadword (a0 may be misaligned) lda $2, -1 insqh $2, $16, $2 andnot $16, 7, $16 or $2, $1, $1 cmpbge $31, $1, $2 # bits set iff byte == 0 bne $2, $found $loop: ldq $1, 8($16) addq $16, 8, $16 cmpbge $31, $1, $2 beq $2, $loop $found: negq $2, $3 # clear all but least set bit and $2, $3, $2 and $2, 0xf0, $3 # binary search for that set bit and $2, 0xcc, $4 and $2, 0xaa, $5 cmovne $3, 4, $3 cmovne $4, 2, $4 cmovne $5, 1, $5 addq $3, $4, $3 addq $16, $5, $16 addq $16, $3, $16 /* Now do the append. */ mov $26, $23 br __stxcpy .end strcat EXPORT_SYMBOL(strcat);
aixcc-public/challenge-001-exemplar-source
2,019
arch/alpha/lib/strchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strchr.S * Contributed by Richard Henderson (rth@tamu.edu) * * Return the address of a given character within a null-terminated * string, or null if it is not found. */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 3 .globl strchr .ent strchr strchr: .frame sp, 0, ra .prologue 0 zapnot a1, 1, a1 # e0 : zero extend the search character ldq_u t0, 0(a0) # .. e1 : load first quadword sll a1, 8, t5 # e0 : replicate the search character andnot a0, 7, v0 # .. e1 : align our loop pointer or t5, a1, a1 # e0 : lda t4, -1 # .. e1 : build garbage mask sll a1, 16, t5 # e0 : cmpbge zero, t0, t2 # .. e1 : bits set iff byte == zero mskqh t4, a0, t4 # e0 : or t5, a1, a1 # .. e1 : sll a1, 32, t5 # e0 : cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage or t5, a1, a1 # e0 : xor t0, a1, t1 # .. e1 : make bytes == c zero cmpbge zero, t1, t3 # e0 : bits set iff byte == c or t2, t3, t0 # e1 : bits set iff char match or zero match andnot t0, t4, t0 # e0 : clear garbage bits bne t0, $found # .. e1 (zdb) $loop: ldq t0, 8(v0) # e0 : addq v0, 8, v0 # .. e1 : nop # e0 : xor t0, a1, t1 # .. e1 (ev5 data stall) cmpbge zero, t0, t2 # e0 : bits set iff byte == 0 cmpbge zero, t1, t3 # .. e1 : bits set iff byte == c or t2, t3, t0 # e0 : beq t0, $loop # .. e1 (zdb) $found: negq t0, t1 # e0 : clear all but least set bit and t0, t1, t0 # e1 (stall) and t0, t3, t1 # e0 : bit set iff byte was the char beq t1, $retnull # .. e1 (zdb) and t0, 0xf0, t2 # e0 : binary search for that set bit and t0, 0xcc, t3 # .. e1 : and t0, 0xaa, t4 # e0 : cmovne t2, 4, t2 # .. e1 : cmovne t3, 2, t3 # e0 : cmovne t4, 1, t4 # .. e1 : addq t2, t3, t2 # e0 : addq v0, t4, v0 # .. e1 : addq v0, t2, v0 # e0 : ret # .. e1 : $retnull: mov zero, v0 # e0 : ret # .. e1 : .end strchr EXPORT_SYMBOL(strchr)
aixcc-public/challenge-001-exemplar-source
7,016
arch/alpha/lib/ev6-copy_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-copy_user.S * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Copy to/from user space, handling exceptions as we go.. This * isn't exactly pretty. * * This is essentially the same as "memcpy()", but with a few twists. * Notably, we have to make sure that $0 is always up-to-date and * contains the right "bytes left to copy" value (and that it is updated * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitin-99b($31); \ .previous #define EXO(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitout-99b($31); \ .previous .set noat .align 4 .globl __copy_user .ent __copy_user # Pipeline info: Slotting & Comments __copy_user: .prologue 0 mov $18, $0 # .. .. .. E subq $18, 32, $1 # .. .. E. .. : Is this going to be a small copy? nop # .. E .. .. beq $18, $zerolength # U .. .. .. : U L U L and $16,7,$3 # .. .. .. E : is leading dest misalignment ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) subq $3, 8, $3 # E .. .. .. : L U U L : trip counter /* * The fetcher stall also hides the 1 cycle cross-cluster stall for $3 (L --> U) * This loop aligns the destination a byte at a time * We know we have at least one trip through this loop */ $aligndest: EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG addq $3,1,$3 # .. E .. .. : nop # E .. .. .. : U L U L /* * the -1 is to compensate for the inc($16) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ EXO( stb $1,-1($16) ) # .. .. .. L : addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG subq $0,1,$0 # .. E .. .. : bne $3, $aligndest # U .. .. .. : U L U L /* * If we fell through into here, we have a minimum of 33 - 7 bytes * If we arrived via branch, we have a minimum of 32 bytes */ $destaligned: and $17,7,$1 # .. .. .. E : Check _current_ source alignment bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code beq $1,$quadaligned # U .. .. .. : U L U L /* * In the worst case, we've just executed an ldq_u here from 0($17) * and we'll repeat it once if we take the branch */ /* Misaligned quadword loop - not unrolled. Leave it that way. */ $misquad: EXI( ldq_u $2,8($17) ) # .. .. .. L : subq $4,8,$4 # .. .. E .. : extql $3,$17,$3 # .. U .. .. : extqh $2,$17,$1 # U .. .. .. : U U L L bis $3,$1,$1 # .. .. .. E : EXO( stq $1,0($16) ) # .. .. L .. : addq $17,8,$17 # .. E .. .. : subq $0,8,$0 # E .. .. .. : U L L U addq $16,8,$16 # .. .. .. E : bis $2,$2,$3 # .. .. E .. : nop # .. E .. .. : bne $4,$misquad # U .. .. .. : U L U L nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $0,$zerolength # U .. .. .. : U L U L /* We know we have at least one trip through the byte loop */ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : br $31, $dirtyentry # L0 .. .. .. : L U U L /* Do the trailing byte loop load, then hop into the store part of the loop */ /* * A minimum of (33 - 7) bytes to do a quad at a time. * Based upon the usage context, it's worth the effort to unroll this loop * $0 - number of bytes to be moved * $4 - number of bytes to move as quadwords * $16 is current destination address * $17 is current source address */ $quadaligned: subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff nop # .. .. E .. nop # .. E .. .. blt $2, $onequad # U .. .. .. : U L U L /* * There is a significant assumption here that the source and destination * addresses differ by more than 32 bytes. In this particular case, a * sparsity of registers further bounds this to be a minimum of 8 bytes. * But if this isn't met, then the output result will be incorrect. * Furthermore, due to a lack of available registers, we really can't * unroll this to be an 8x loop (which would enable us to use the wh64 * instruction memory hint instruction). */ $unroll4: EXI( ldq $1,0($17) ) # .. .. .. L EXI( ldq $2,8($17) ) # .. .. L .. subq $4,32,$4 # .. E .. .. nop # E .. .. .. : U U L L addq $17,16,$17 # .. .. .. E EXO( stq $1,0($16) ) # .. .. L .. EXO( stq $2,8($16) ) # .. L .. .. subq $0,16,$0 # E .. .. .. : U L L U addq $16,16,$16 # .. .. .. E EXI( ldq $1,0($17) ) # .. .. L .. EXI( ldq $2,8($17) ) # .. L .. .. subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? EXO( stq $1,0($16) ) # .. .. .. L EXO( stq $2,8($16) ) # .. .. L .. subq $0,16,$0 # .. E .. .. addq $17,16,$17 # E .. .. .. : U L L U nop # .. .. .. E nop # .. .. E .. addq $16,16,$16 # .. E .. .. bgt $3,$unroll4 # U .. .. .. : U L U L nop nop nop beq $4, $noquads $onequad: EXI( ldq $1,0($17) ) subq $4,8,$4 addq $17,8,$17 nop EXO( stq $1,0($16) ) subq $0,8,$0 addq $16,8,$16 bne $4,$onequad $noquads: nop nop nop beq $0,$zerolength /* * For small copies (or the tail of a larger copy), do a very simple byte loop. * There's no point in doing a lot of complex alignment calculations to try to * to quadword stuff for a small amount of data. * $0 - remaining number of bytes left to copy * $16 - current dest addr * $17 - current source addr */ $onebyteloop: EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : nop # E .. .. .. : U L U L $dirtyentry: /* * the -1 is to compensate for the inc($16) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ EXO ( stb $2,-1($16) ) # .. .. .. L : addq $17,1,$17 # .. .. E .. : quadpack as the load subq $0,1,$0 # .. E .. .. : change count _after_ copy bgt $0,$onebyteloop # U .. .. .. : U L U L $zerolength: $exitin: $exitout: # Destination for exception recovery(?) nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. ret $31,($26),1 # L0 .. .. .. : L U L U .end __copy_user EXPORT_SYMBOL(__copy_user)
aixcc-public/challenge-001-exemplar-source
1,572
arch/alpha/lib/strncpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strncpy.S * Contributed by Richard Henderson (rth@tamu.edu) * * Copy no more than COUNT bytes of the null-terminated string from * SRC to DST. If SRC does not cover all of COUNT, the balance is * zeroed. * * Or, rather, if the kernel cared about that weird ANSI quirk. This * version has cropped that bit o' nastiness as well as assuming that * __stxncpy is in range of a branch. */ #include <asm/export.h> .set noat .set noreorder .text .align 4 .globl strncpy .ent strncpy strncpy: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set return value now beq $18, $zerolen unop bsr $23, __stxncpy # do the work of the copy unop bne $18, $multiword # do we have full words left? subq $24, 1, $3 # nope subq $27, 1, $4 or $3, $24, $3 # clear the bits between the last or $4, $27, $4 # written byte and the last byte in COUNT andnot $3, $4, $4 zap $1, $4, $1 stq_u $1, 0($16) ret .align 4 $multiword: subq $27, 1, $2 # clear the final bits in the prev word or $2, $27, $2 zapnot $1, $2, $1 subq $18, 1, $18 stq_u $1, 0($16) addq $16, 8, $16 unop beq $18, 1f nop unop nop blbc $18, 0f stq_u $31, 0($16) # zero one word subq $18, 1, $18 addq $16, 8, $16 beq $18, 1f 0: stq_u $31, 0($16) # zero two words subq $18, 2, $18 stq_u $31, 8($16) addq $16, 16, $16 bne $18, 0b 1: ldq_u $1, 0($16) # clear the leading bits in the final word subq $24, 1, $2 or $2, $24, $2 zap $1, $2, $1 stq_u $1, 0($16) $zerolen: ret .end strncpy EXPORT_SYMBOL(strncpy)
aixcc-public/challenge-001-exemplar-source
1,845
arch/alpha/boot/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/boot/head.S * * initial bootloader stuff.. */ #include <asm/pal.h> .set noreorder .globl __start .ent __start __start: br $29,2f 2: ldgp $29,0($29) jsr $26,start_kernel call_pal PAL_halt .end __start .align 5 .globl wrent .ent wrent wrent: .prologue 0 call_pal PAL_wrent ret ($26) .end wrent .align 5 .globl wrkgp .ent wrkgp wrkgp: .prologue 0 call_pal PAL_wrkgp ret ($26) .end wrkgp .align 5 .globl switch_to_osf_pal .ent switch_to_osf_pal switch_to_osf_pal: subq $30,128,$30 .frame $30,128,$26 stq $26,0($30) stq $1,8($30) stq $2,16($30) stq $3,24($30) stq $4,32($30) stq $5,40($30) stq $6,48($30) stq $7,56($30) stq $8,64($30) stq $9,72($30) stq $10,80($30) stq $11,88($30) stq $12,96($30) stq $13,104($30) stq $14,112($30) stq $15,120($30) .prologue 0 stq $30,0($17) /* save KSP in PCB */ bis $30,$30,$20 /* a4 = KSP */ br $17,1f ldq $26,0($30) ldq $1,8($30) ldq $2,16($30) ldq $3,24($30) ldq $4,32($30) ldq $5,40($30) ldq $6,48($30) ldq $7,56($30) ldq $8,64($30) ldq $9,72($30) ldq $10,80($30) ldq $11,88($30) ldq $12,96($30) ldq $13,104($30) ldq $14,112($30) ldq $15,120($30) addq $30,128,$30 ret ($26) 1: call_pal PAL_swppal .end switch_to_osf_pal .align 3 .globl tbi .ent tbi tbi: .prologue 0 call_pal PAL_tbi ret ($26) .end tbi .align 3 .globl halt .ent halt halt: .prologue 0 call_pal PAL_halt .end halt /* $16 - new stack page */ .align 3 .globl move_stack .ent move_stack move_stack: .prologue 0 lda $0, 0x1fff($31) and $0, $30, $1 /* Stack offset */ or $1, $16, $16 /* New stack pointer */ mov $30, $1 mov $16, $2 1: ldq $3, 0($1) /* Move the stack */ addq $1, 8, $1 stq $3, 0($2) and $0, $1, $4 addq $2, 8, $2 bne $4, 1b mov $16, $30 ret ($26) .end move_stack
aixcc-public/challenge-001-exemplar-source
1,307
arch/mips/power/hibernate_asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Hibernation support specific for mips - temporary page tables * * Copyright (C) 2009 Lemote Inc. * Author: Hu Hongbing <huhb@lemote.com> * Wu Zhangjin <wuzhangjin@gmail.com> */ #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/asm.h> .text LEAF(swsusp_arch_suspend) PTR_LA t0, saved_regs PTR_S ra, PT_R31(t0) PTR_S sp, PT_R29(t0) PTR_S fp, PT_R30(t0) PTR_S gp, PT_R28(t0) PTR_S s0, PT_R16(t0) PTR_S s1, PT_R17(t0) PTR_S s2, PT_R18(t0) PTR_S s3, PT_R19(t0) PTR_S s4, PT_R20(t0) PTR_S s5, PT_R21(t0) PTR_S s6, PT_R22(t0) PTR_S s7, PT_R23(t0) j swsusp_save END(swsusp_arch_suspend) LEAF(restore_image) PTR_L t0, restore_pblist 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ PTR_ADDU t3, t1, _PAGE_SIZE 1: REG_L t8, (t1) REG_S t8, (t2) PTR_ADDIU t1, t1, SZREG PTR_ADDIU t2, t2, SZREG bne t1, t3, 1b PTR_L t0, PBE_NEXT(t0) bnez t0, 0b PTR_LA t0, saved_regs PTR_L ra, PT_R31(t0) PTR_L sp, PT_R29(t0) PTR_L fp, PT_R30(t0) PTR_L gp, PT_R28(t0) PTR_L s0, PT_R16(t0) PTR_L s1, PT_R17(t0) PTR_L s2, PT_R18(t0) PTR_L s3, PT_R19(t0) PTR_L s4, PT_R20(t0) PTR_L s5, PT_R21(t0) PTR_L s6, PT_R22(t0) PTR_L s7, PT_R23(t0) PTR_LI v0, 0x0 jr ra END(restore_image)
aixcc-public/challenge-001-exemplar-source
2,603
arch/mips/vdso/vdso.lds.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2015 Imagination Technologies * Author: Alex Smith <alex.smith@imgtec.com> */ #include <asm/sgidefs.h> #if _MIPS_SIM == _MIPS_SIM_ABI64 OUTPUT_FORMAT("elf64-tradlittlemips", "elf64-tradbigmips", "elf64-tradlittlemips") #elif _MIPS_SIM == _MIPS_SIM_NABI32 OUTPUT_FORMAT("elf32-ntradlittlemips", "elf32-ntradbigmips", "elf32-ntradlittlemips") #else OUTPUT_FORMAT("elf32-tradlittlemips", "elf32-tradbigmips", "elf32-tradlittlemips") #endif OUTPUT_ARCH(mips) SECTIONS { PROVIDE(_start = .); . = SIZEOF_HEADERS; /* * In order to retain compatibility with older toolchains we provide the * ABI flags section ourself. Newer assemblers will automatically * generate .MIPS.abiflags sections so we discard such input sections, * and then manually define our own section here. genvdso will patch * this section to have the correct name/type. */ .mips_abiflags : { *(.mips_abiflags) } :text :abiflags .reginfo : { *(.reginfo) } :text :reginfo .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .text : { *(.text*) } :text PROVIDE (__etext = .); PROVIDE (_etext = .); PROVIDE (etext = .); .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) } :text _end = .; PROVIDE(end = .); /DISCARD/ : { *(.MIPS.abiflags) *(.gnu.attributes) *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } PHDRS { /* * Provide a PT_MIPS_ABIFLAGS header to assign the ABI flags section * to. We can specify the header type directly here so no modification * is needed later on. */ abiflags 0x70000003; /* * The ABI flags header must exist directly after the PT_INTERP header, * so we must explicitly place the PT_MIPS_REGINFO header after it to * stop the linker putting one in at the start. */ reginfo 0x70000000; text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } VERSION { LINUX_2.6 { #ifndef CONFIG_MIPS_DISABLE_VDSO global: __vdso_clock_gettime; __vdso_gettimeofday; __vdso_clock_getres; #if _MIPS_SIM != _MIPS_SIM_ABI64 __vdso_clock_gettime64; #endif #endif local: *; }; }
aixcc-public/challenge-001-exemplar-source
1,603
arch/mips/vdso/elf.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2015 Imagination Technologies * Author: Alex Smith <alex.smith@imgtec.com> */ #include <asm/vdso/vdso.h> #include <asm/isa-rev.h> #include <linux/elfnote.h> #include <linux/version.h> ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END /* * The .MIPS.abiflags section must be defined with the FP ABI flags set * to 'any' to be able to link with both old and new libraries. * Newer toolchains are capable of automatically generating this, but we want * to work with older toolchains as well. Therefore, we define the contents of * this section here (under different names), and then genvdso will patch * it to have the correct name and type. * * We base the .MIPS.abiflags section on preprocessor definitions rather than * CONFIG_* because we need to match the particular ABI we are building the * VDSO for. * * See https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking * for the .MIPS.abiflags section description. */ .section .mips_abiflags, "a" .align 3 __mips_abiflags: .hword 0 /* version */ .byte __mips /* isa_level */ /* isa_rev */ .byte MIPS_ISA_REV /* gpr_size */ #ifdef __mips64 .byte 2 /* AFL_REG_64 */ #else .byte 1 /* AFL_REG_32 */ #endif /* cpr1_size */ #if (MIPS_ISA_REV >= 6) || defined(__mips64) .byte 2 /* AFL_REG_64 */ #else .byte 1 /* AFL_REG_32 */ #endif .byte 0 /* cpr2_size (AFL_REG_NONE) */ .byte 0 /* fp_abi (Val_GNU_MIPS_ABI_FP_ANY) */ .word 0 /* isa_ext */ .word 0 /* ases */ .word 0 /* flags1 */ .word 0 /* flags2 */
aixcc-public/challenge-001-exemplar-source
7,780
arch/mips/dec/int-handler.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1995, 1996, 1997 Paul M. Antoine and Harald Koerfgen * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki * * Written by Ralf Baechle and Andreas Busse, modified for DECstation * support by Paul Antoine and Harald Koerfgen. * * completely rewritten: * Copyright (C) 1998 Harald Koerfgen * * Rewritten extensively for controller-driven IRQ support * by Maciej W. Rozycki. */ #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/dec/interrupts.h> #include <asm/dec/ioasic_addrs.h> #include <asm/dec/ioasic_ints.h> #include <asm/dec/kn01.h> #include <asm/dec/kn02.h> #include <asm/dec/kn02xa.h> #include <asm/dec/kn03.h> #define KN02_CSR_BASE CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR) #define KN02XA_IOASIC_BASE CKSEG1ADDR(KN02XA_SLOT_BASE + IOASIC_IOCTL) #define KN03_IOASIC_BASE CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_IOCTL) .text .set noreorder /* * plat_irq_dispatch: Interrupt handler for DECstations * * We follow the model in the Indy interrupt code by David Miller, where he * says: a lot of complication here is taken away because: * * 1) We handle one interrupt and return, sitting in a loop * and moving across all the pending IRQ bits in the cause * register is _NOT_ the answer, the common case is one * pending IRQ so optimize in that direction. * * 2) We need not check against bits in the status register * IRQ mask, that would make this routine slow as hell. * * 3) Linux only thinks in terms of all IRQs on or all IRQs * off, nothing in between like BSD spl() brain-damage. * * Furthermore, the IRQs on the DECstations look basically (barring * software IRQs which we don't use at all) like... * * DS2100/3100's, aka kn01, aka Pmax: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 SCSI * 3 Lance Ethernet * 4 DZ11 serial * 5 RTC * 6 Memory Controller & Video * 7 FPU * * DS5000/200, aka kn02, aka 3max: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 TurboChannel * 3 RTC * 4 Reserved * 5 Memory Controller * 6 Reserved * 7 FPU * * DS5000/1xx's, aka kn02ba, aka 3min: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 TurboChannel Slot 0 * 3 TurboChannel Slot 1 * 4 TurboChannel Slot 2 * 5 TurboChannel Slot 3 (ASIC) * 6 Halt button * 7 FPU/R4k timer * * DS5000/2x's, aka kn02ca, aka maxine: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 Periodic Interrupt (100usec) * 3 RTC * 4 I/O write timeout * 5 TurboChannel (ASIC) * 6 Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER) * 7 FPU/R4k timer * * DS5000/2xx's, aka kn03, aka 3maxplus: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 System Board (ASIC) * 3 RTC * 4 Reserved * 5 Memory * 6 Halt Button * 7 FPU/R4k timer * * We handle the IRQ according to _our_ priority (see setup.c), * then we just return. If multiple IRQs are pending then we will * just take another exception, big deal. */ .align 5 NESTED(plat_irq_dispatch, PT_SIZE, ra) .set noreorder /* * Get pending Interrupts */ mfc0 t0,CP0_CAUSE # get pending interrupts mfc0 t1,CP0_STATUS #if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) lw t2,cpu_fpu_mask #endif andi t0,ST0_IM # CAUSE.CE may be non-zero! and t0,t1 # isolate allowed ones beqz t0,spurious #if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) and t2,t0 bnez t2,fpu # handle FPU immediately #endif /* * Find irq with highest priority */ # open coded PTR_LA t1, cpu_mask_nr_tbl #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, cpu_mask_nr_tbl lui t1, %hi(cpu_mask_nr_tbl) addiu t1, %lo(cpu_mask_nr_tbl) #else #error GCC `-msym32' option required for 64-bit DECstation builds #endif 1: lw t2,(t1) nop and t2,t0 beqz t2,1b addu t1,2*PTRSIZE # delay slot /* * Do the low-level stuff */ lw a0,(-PTRSIZE)(t1) nop bgez a0,handle_it # irq_nr >= 0? # irq_nr < 0: it is an address nop jr a0 # a trick to save a branch: lui t2,(KN03_IOASIC_BASE>>16)&0xffff # upper part of IOASIC Address /* * Handle "IRQ Controller" Interrupts * Masked Interrupts are still visible and have to be masked "by hand". */ FEXPORT(kn02_io_int) # 3max lui t0,(KN02_CSR_BASE>>16)&0xffff # get interrupt status and mask lw t0,(t0) nop andi t1,t0,KN02_IRQ_ALL b 1f srl t0,16 # shift interrupt mask FEXPORT(kn02xa_io_int) # 3min/maxine lui t2,(KN02XA_IOASIC_BASE>>16)&0xffff # upper part of IOASIC Address FEXPORT(kn03_io_int) # 3max+ (t2 loaded earlier) lw t0,IO_REG_SIR(t2) # get status: IOASIC sir lw t1,IO_REG_SIMR(t2) # get mask: IOASIC simr nop 1: and t0,t1 # mask out allowed ones beqz t0,spurious /* * Find irq with highest priority */ # open coded PTR_LA t1,asic_mask_nr_tbl #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, asic_mask_nr_tbl lui t1, %hi(asic_mask_nr_tbl) addiu t1, %lo(asic_mask_nr_tbl) #else #error GCC `-msym32' option required for 64-bit DECstation builds #endif 2: lw t2,(t1) nop and t2,t0 beq zero,t2,2b addu t1,2*PTRSIZE # delay slot /* * Do the low-level stuff */ lw a0,%lo(-PTRSIZE)(t1) nop bgez a0,handle_it # irq_nr >= 0? # irq_nr < 0: it is an address nop jr a0 nop # delay slot /* * Dispatch low-priority interrupts. We reconsider all status * bits again, which looks like a lose, but it makes the code * simple and O(log n), so it gets compensated. */ FEXPORT(cpu_all_int) # HALT, timers, software junk li a0,DEC_CPU_IRQ_BASE srl t0,CAUSEB_IP li t1,CAUSEF_IP>>CAUSEB_IP # mask b 1f li t2,4 # nr of bits / 2 FEXPORT(kn02_all_int) # impossible ? li a0,KN02_IRQ_BASE li t1,KN02_IRQ_ALL # mask b 1f li t2,4 # nr of bits / 2 FEXPORT(asic_all_int) # various I/O ASIC junk li a0,IO_IRQ_BASE li t1,IO_IRQ_ALL # mask b 1f li t2,8 # nr of bits / 2 /* * Dispatch DMA interrupts -- O(log n). */ FEXPORT(asic_dma_int) # I/O ASIC DMA events li a0,IO_IRQ_BASE+IO_INR_DMA srl t0,IO_INR_DMA li t1,IO_IRQ_DMA>>IO_INR_DMA # mask li t2,8 # nr of bits / 2 /* * Find irq with highest priority. * Highest irq number takes precedence. */ 1: srlv t3,t1,t2 2: xor t1,t3 and t3,t0,t1 beqz t3,3f nop move t0,t3 addu a0,t2 3: srl t2,1 bnez t2,2b srlv t3,t1,t2 handle_it: j dec_irq_dispatch nop #if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) fpu: lw t0,fpu_kstat_irq nop lw t1,(t0) nop addu t1,1 j handle_fpe_int sw t1,(t0) #endif spurious: j spurious_interrupt nop END(plat_irq_dispatch) /* * Generic unimplemented interrupt routines -- cpu_mask_nr_tbl * and asic_mask_nr_tbl are initialized to point all interrupts here. * The tables are then filled in by machine-specific initialisation * in dec_setup(). */ FEXPORT(dec_intr_unimplemented) move a1,t0 # cheats way of printing an arg! ASM_PANIC("Unimplemented cpu interrupt! CP0_CAUSE: 0x%08x"); FEXPORT(asic_intr_unimplemented) move a1,t0 # cheats way of printing an arg! ASM_PANIC("Unimplemented asic interrupt! ASIC ISR: 0x%08x");
aixcc-public/challenge-001-exemplar-source
3,855
arch/mips/kernel/head.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995 Waldorf Electronics * Written by Ralf Baechle and Andreas Busse * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle * Copyright (C) 1996 Paul M. Antoine * Modified for DECStation and hence R3000 support by Paul M. Antoine * Further modifications by David S. Miller and Harald Koerfgen * Copyright (C) 1999 Silicon Graphics, Inc. * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <kernel-entry-init.h> /* * For the moment disable interrupts, mark the kernel mode and * set ST0_KX so that the CPU does not spit fire when using * 64-bit addresses. A full initialization of the CPU's status * register is done later in per_cpu_trap_init(). */ .macro setup_c0_status set clr .set push mfc0 t0, CP0_STATUS or t0, ST0_KERNEL_CUMASK|\set|0x1f|\clr xor t0, 0x1f|\clr mtc0 t0, CP0_STATUS .set noreorder sll zero,3 # ehb .set pop .endm .macro setup_c0_status_pri #ifdef CONFIG_64BIT setup_c0_status ST0_KX 0 #else setup_c0_status 0 0 #endif .endm .macro setup_c0_status_sec #ifdef CONFIG_64BIT setup_c0_status ST0_KX ST0_BEV #else setup_c0_status 0 ST0_BEV #endif .endm #ifndef CONFIG_NO_EXCEPT_FILL /* * Reserved space for exception handlers. * Necessary for machines which link their kernels at KSEG0. */ .fill 0x400 #endif EXPORT(_stext) #ifdef CONFIG_BOOT_RAW /* * Give us a fighting chance of running if execution beings at the * kernel load address. This is needed because this platform does * not have a ELF loader yet. */ FEXPORT(__kernel_entry) j kernel_entry #endif /* CONFIG_BOOT_RAW */ __REF NESTED(kernel_entry, 16, sp) # kernel entry point kernel_entry_setup # cpu specific setup setup_c0_status_pri /* We might not get launched at the address the kernel is linked to, so we jump there. */ PTR_LA t0, 0f jr t0 0: PTR_LA t0, __bss_start # clear .bss LONG_S zero, (t0) PTR_LA t1, __bss_stop - LONGSIZE 1: PTR_ADDIU t0, LONGSIZE LONG_S zero, (t0) bne t0, t1, 1b LONG_S a0, fw_arg0 # firmware arguments LONG_S a1, fw_arg1 LONG_S a2, fw_arg2 LONG_S a3, fw_arg3 MTC0 zero, CP0_CONTEXT # clear context register #ifdef CONFIG_64BIT MTC0 zero, CP0_XCONTEXT #endif PTR_LA $28, init_thread_union /* Set the SP after an empty pt_regs. */ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE PTR_ADDU sp, $28 back_to_back_c0_hazard set_saved_sp sp, t0, t1 PTR_SUBU sp, 4 * SZREG # init stack pointer #ifdef CONFIG_RELOCATABLE /* Copy kernel and apply the relocations */ jal relocate_kernel /* Repoint the sp into the new kernel image */ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE PTR_ADDU sp, $28 set_saved_sp sp, t0, t1 PTR_SUBU sp, 4 * SZREG # init stack pointer /* * relocate_kernel returns the entry point either * in the relocated kernel or the original if for * some reason relocation failed - jump there now * with instruction hazard barrier because of the * newly sync'd icache. */ jr.hb v0 #else /* !CONFIG_RELOCATABLE */ j start_kernel #endif /* !CONFIG_RELOCATABLE */ END(kernel_entry) #ifdef CONFIG_SMP /* * SMP slave cpus entry point. Board specific code for bootstrap calls this * function after setting up the stack and gp registers. */ NESTED(smp_bootstrap, 16, sp) smp_slave_setup setup_c0_status_sec j start_secondary END(smp_bootstrap) #endif /* CONFIG_SMP */
aixcc-public/challenge-001-exemplar-source
9,337
arch/mips/kernel/r4k_fpu.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle * * Multi-arch abstraction and asm macros for easier reading: * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. * Copyright (C) 1999, 2001 Silicon Graphics, Inc. */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ #undef fp .macro EX insn, reg, src .set push SET_HARDFLOAT .set nomacro .ex\@: \insn \reg, \src .set pop .section __ex_table,"a" PTR_WD .ex\@, fault .previous .endm /* * Save a thread's fp context. */ LEAF(_save_fp) EXPORT_SYMBOL(_save_fp) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 jr ra END(_save_fp) /* * Restore a thread's fp context. */ LEAF(_restore_fp) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 jr ra END(_restore_fp) #ifdef CONFIG_CPU_HAS_MSA /* * Save a thread's MSA vector context. */ LEAF(_save_msa) EXPORT_SYMBOL(_save_msa) msa_save_all a0 jr ra END(_save_msa) /* * Restore a thread's MSA vector context. */ LEAF(_restore_msa) msa_restore_all a0 jr ra END(_restore_msa) LEAF(_init_msa_upper) msa_init_all_upper jr ra END(_init_msa_upper) #endif .set noreorder /** * _save_fp_context() - save FP context from the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Save FP context, including the 32 FP data registers and the FP * control & status register, from the FPU to signal context. */ LEAF(_save_fp_context) .set push SET_HARDFLOAT cfc1 t1, fcr31 .set pop #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip storing odd if FR=0 nop #endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, 8(a0) EX sdc1 $f3, 24(a0) EX sdc1 $f5, 40(a0) EX sdc1 $f7, 56(a0) EX sdc1 $f9, 72(a0) EX sdc1 $f11, 88(a0) EX sdc1 $f13, 104(a0) EX sdc1 $f15, 120(a0) EX sdc1 $f17, 136(a0) EX sdc1 $f19, 152(a0) EX sdc1 $f21, 168(a0) EX sdc1 $f23, 184(a0) EX sdc1 $f25, 200(a0) EX sdc1 $f27, 216(a0) EX sdc1 $f29, 232(a0) EX sdc1 $f31, 248(a0) 1: .set pop #endif .set push SET_HARDFLOAT /* Store the 16 even double precision registers */ EX sdc1 $f0, 0(a0) EX sdc1 $f2, 16(a0) EX sdc1 $f4, 32(a0) EX sdc1 $f6, 48(a0) EX sdc1 $f8, 64(a0) EX sdc1 $f10, 80(a0) EX sdc1 $f12, 96(a0) EX sdc1 $f14, 112(a0) EX sdc1 $f16, 128(a0) EX sdc1 $f18, 144(a0) EX sdc1 $f20, 160(a0) EX sdc1 $f22, 176(a0) EX sdc1 $f24, 192(a0) EX sdc1 $f26, 208(a0) EX sdc1 $f28, 224(a0) EX sdc1 $f30, 240(a0) EX sw t1, 0(a1) jr ra li v0, 0 # success .set pop END(_save_fp_context) /** * _restore_fp_context() - restore FP context to the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Restore FP context, including the 32 FP data registers and the FP * control & status register, from signal context to the FPU. */ LEAF(_restore_fp_context) EX lw t1, 0(a1) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip loading odd if FR=0 nop #endif EX ldc1 $f1, 8(a0) EX ldc1 $f3, 24(a0) EX ldc1 $f5, 40(a0) EX ldc1 $f7, 56(a0) EX ldc1 $f9, 72(a0) EX ldc1 $f11, 88(a0) EX ldc1 $f13, 104(a0) EX ldc1 $f15, 120(a0) EX ldc1 $f17, 136(a0) EX ldc1 $f19, 152(a0) EX ldc1 $f21, 168(a0) EX ldc1 $f23, 184(a0) EX ldc1 $f25, 200(a0) EX ldc1 $f27, 216(a0) EX ldc1 $f29, 232(a0) EX ldc1 $f31, 248(a0) 1: .set pop #endif .set push SET_HARDFLOAT EX ldc1 $f0, 0(a0) EX ldc1 $f2, 16(a0) EX ldc1 $f4, 32(a0) EX ldc1 $f6, 48(a0) EX ldc1 $f8, 64(a0) EX ldc1 $f10, 80(a0) EX ldc1 $f12, 96(a0) EX ldc1 $f14, 112(a0) EX ldc1 $f16, 128(a0) EX ldc1 $f18, 144(a0) EX ldc1 $f20, 160(a0) EX ldc1 $f22, 176(a0) EX ldc1 $f24, 192(a0) EX ldc1 $f26, 208(a0) EX ldc1 $f28, 224(a0) EX ldc1 $f30, 240(a0) ctc1 t1, fcr31 .set pop jr ra li v0, 0 # success END(_restore_fp_context) #ifdef CONFIG_CPU_HAS_MSA .macro op_one_wr op, idx, base .align 4 \idx: \op \idx, 0, \base jr ra nop .endm .macro op_msa_wr name, op LEAF(\name) .set push .set noreorder sll t0, a0, 4 PTR_LA t1, 0f PTR_ADDU t0, t0, t1 jr t0 nop op_one_wr \op, 0, a1 op_one_wr \op, 1, a1 op_one_wr \op, 2, a1 op_one_wr \op, 3, a1 op_one_wr \op, 4, a1 op_one_wr \op, 5, a1 op_one_wr \op, 6, a1 op_one_wr \op, 7, a1 op_one_wr \op, 8, a1 op_one_wr \op, 9, a1 op_one_wr \op, 10, a1 op_one_wr \op, 11, a1 op_one_wr \op, 12, a1 op_one_wr \op, 13, a1 op_one_wr \op, 14, a1 op_one_wr \op, 15, a1 op_one_wr \op, 16, a1 op_one_wr \op, 17, a1 op_one_wr \op, 18, a1 op_one_wr \op, 19, a1 op_one_wr \op, 20, a1 op_one_wr \op, 21, a1 op_one_wr \op, 22, a1 op_one_wr \op, 23, a1 op_one_wr \op, 24, a1 op_one_wr \op, 25, a1 op_one_wr \op, 26, a1 op_one_wr \op, 27, a1 op_one_wr \op, 28, a1 op_one_wr \op, 29, a1 op_one_wr \op, 30, a1 op_one_wr \op, 31, a1 .set pop END(\name) .endm op_msa_wr read_msa_wr_b, st_b op_msa_wr read_msa_wr_h, st_h op_msa_wr read_msa_wr_w, st_w op_msa_wr read_msa_wr_d, st_d op_msa_wr write_msa_wr_b, ld_b op_msa_wr write_msa_wr_h, ld_h op_msa_wr write_msa_wr_w, ld_w op_msa_wr write_msa_wr_d, ld_d #endif /* CONFIG_CPU_HAS_MSA */ #ifdef CONFIG_CPU_HAS_MSA .macro save_msa_upper wr, off, base .set push .set noat #ifdef CONFIG_64BIT copy_s_d \wr, 1 EX sd $1, \off(\base) #elif defined(CONFIG_CPU_LITTLE_ENDIAN) copy_s_w \wr, 2 EX sw $1, \off(\base) copy_s_w \wr, 3 EX sw $1, (\off+4)(\base) #else /* CONFIG_CPU_BIG_ENDIAN */ copy_s_w \wr, 2 EX sw $1, (\off+4)(\base) copy_s_w \wr, 3 EX sw $1, \off(\base) #endif .set pop .endm LEAF(_save_msa_all_upper) save_msa_upper 0, 0x00, a0 save_msa_upper 1, 0x08, a0 save_msa_upper 2, 0x10, a0 save_msa_upper 3, 0x18, a0 save_msa_upper 4, 0x20, a0 save_msa_upper 5, 0x28, a0 save_msa_upper 6, 0x30, a0 save_msa_upper 7, 0x38, a0 save_msa_upper 8, 0x40, a0 save_msa_upper 9, 0x48, a0 save_msa_upper 10, 0x50, a0 save_msa_upper 11, 0x58, a0 save_msa_upper 12, 0x60, a0 save_msa_upper 13, 0x68, a0 save_msa_upper 14, 0x70, a0 save_msa_upper 15, 0x78, a0 save_msa_upper 16, 0x80, a0 save_msa_upper 17, 0x88, a0 save_msa_upper 18, 0x90, a0 save_msa_upper 19, 0x98, a0 save_msa_upper 20, 0xa0, a0 save_msa_upper 21, 0xa8, a0 save_msa_upper 22, 0xb0, a0 save_msa_upper 23, 0xb8, a0 save_msa_upper 24, 0xc0, a0 save_msa_upper 25, 0xc8, a0 save_msa_upper 26, 0xd0, a0 save_msa_upper 27, 0xd8, a0 save_msa_upper 28, 0xe0, a0 save_msa_upper 29, 0xe8, a0 save_msa_upper 30, 0xf0, a0 save_msa_upper 31, 0xf8, a0 jr ra li v0, 0 END(_save_msa_all_upper) .macro restore_msa_upper wr, off, base .set push .set noat #ifdef CONFIG_64BIT EX ld $1, \off(\base) insert_d \wr, 1 #elif defined(CONFIG_CPU_LITTLE_ENDIAN) EX lw $1, \off(\base) insert_w \wr, 2 EX lw $1, (\off+4)(\base) insert_w \wr, 3 #else /* CONFIG_CPU_BIG_ENDIAN */ EX lw $1, (\off+4)(\base) insert_w \wr, 2 EX lw $1, \off(\base) insert_w \wr, 3 #endif .set pop .endm LEAF(_restore_msa_all_upper) restore_msa_upper 0, 0x00, a0 restore_msa_upper 1, 0x08, a0 restore_msa_upper 2, 0x10, a0 restore_msa_upper 3, 0x18, a0 restore_msa_upper 4, 0x20, a0 restore_msa_upper 5, 0x28, a0 restore_msa_upper 6, 0x30, a0 restore_msa_upper 7, 0x38, a0 restore_msa_upper 8, 0x40, a0 restore_msa_upper 9, 0x48, a0 restore_msa_upper 10, 0x50, a0 restore_msa_upper 11, 0x58, a0 restore_msa_upper 12, 0x60, a0 restore_msa_upper 13, 0x68, a0 restore_msa_upper 14, 0x70, a0 restore_msa_upper 15, 0x78, a0 restore_msa_upper 16, 0x80, a0 restore_msa_upper 17, 0x88, a0 restore_msa_upper 18, 0x90, a0 restore_msa_upper 19, 0x98, a0 restore_msa_upper 20, 0xa0, a0 restore_msa_upper 21, 0xa8, a0 restore_msa_upper 22, 0xb0, a0 restore_msa_upper 23, 0xb8, a0 restore_msa_upper 24, 0xc0, a0 restore_msa_upper 25, 0xc8, a0 restore_msa_upper 26, 0xd0, a0 restore_msa_upper 27, 0xd8, a0 restore_msa_upper 28, 0xe0, a0 restore_msa_upper 29, 0xe8, a0 restore_msa_upper 30, 0xf0, a0 restore_msa_upper 31, 0xf8, a0 jr ra li v0, 0 END(_restore_msa_all_upper) #endif /* CONFIG_CPU_HAS_MSA */ .set reorder .type fault, @function .ent fault fault: li v0, -EFAULT # failure jr ra .end fault
aixcc-public/challenge-001-exemplar-source
2,907
arch/mips/kernel/r2300_fpu.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1998 by Ralf Baechle * * Multi-arch abstraction and asm macros for easier reading: * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * Further modifications to make this work: * Copyright (c) 1998 Harald Koerfgen */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #define EX(a,b) \ 9: a,##b; \ .section __ex_table,"a"; \ PTR_WD 9b,fault; \ .previous #define EX2(a,b) \ 9: a,##b; \ .section __ex_table,"a"; \ PTR_WD 9b,fault; \ PTR_WD 9b+4,fault; \ .previous .set mips1 /* * Save a thread's fp context. */ LEAF(_save_fp) EXPORT_SYMBOL(_save_fp) fpu_save_single a0, t1 # clobbers t1 jr ra END(_save_fp) /* * Restore a thread's fp context. */ LEAF(_restore_fp) fpu_restore_single a0, t1 # clobbers t1 jr ra END(_restore_fp) .set noreorder /** * _save_fp_context() - save FP context from the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Save FP context, including the 32 FP data registers and the FP * control & status register, from the FPU to signal context. */ LEAF(_save_fp_context) .set push SET_HARDFLOAT li v0, 0 # assume success cfc1 t1, fcr31 EX2(s.d $f0, 0(a0)) EX2(s.d $f2, 16(a0)) EX2(s.d $f4, 32(a0)) EX2(s.d $f6, 48(a0)) EX2(s.d $f8, 64(a0)) EX2(s.d $f10, 80(a0)) EX2(s.d $f12, 96(a0)) EX2(s.d $f14, 112(a0)) EX2(s.d $f16, 128(a0)) EX2(s.d $f18, 144(a0)) EX2(s.d $f20, 160(a0)) EX2(s.d $f22, 176(a0)) EX2(s.d $f24, 192(a0)) EX2(s.d $f26, 208(a0)) EX2(s.d $f28, 224(a0)) EX2(s.d $f30, 240(a0)) jr ra EX(sw t1, (a1)) .set pop END(_save_fp_context) /** * _restore_fp_context() - restore FP context to the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Restore FP context, including the 32 FP data registers and the FP * control & status register, from signal context to the FPU. */ LEAF(_restore_fp_context) .set push SET_HARDFLOAT li v0, 0 # assume success EX(lw t0, (a1)) EX2(l.d $f0, 0(a0)) EX2(l.d $f2, 16(a0)) EX2(l.d $f4, 32(a0)) EX2(l.d $f6, 48(a0)) EX2(l.d $f8, 64(a0)) EX2(l.d $f10, 80(a0)) EX2(l.d $f12, 96(a0)) EX2(l.d $f14, 112(a0)) EX2(l.d $f16, 128(a0)) EX2(l.d $f18, 144(a0)) EX2(l.d $f20, 160(a0)) EX2(l.d $f22, 176(a0)) EX2(l.d $f24, 192(a0)) EX2(l.d $f26, 208(a0)) EX2(l.d $f28, 224(a0)) EX2(l.d $f30, 240(a0)) jr ra ctc1 t0, fcr31 .set pop END(_restore_fp_context) .set reorder .type fault, @function .ent fault fault: li v0, -EFAULT jr ra .end fault
aixcc-public/challenge-001-exemplar-source
7,181
arch/mips/kernel/bmips_vec.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) * * Reset/NMI/re-entry vectors for BMIPS processors */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/cpu.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/addrspace.h> #include <asm/hazards.h> #include <asm/bmips.h> .macro BARRIER .set mips32 _ssnop _ssnop _ssnop .set mips0 .endm /*********************************************************************** * Alternate CPU1 startup vector for BMIPS4350 * * On some systems the bootloader has already started CPU1 and configured * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is * triggered by the SW1 interrupt. If that is the case we try to move * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. ***********************************************************************/ LEAF(bmips_smp_movevec) la k0, 1f li k1, CKSEG1 or k0, k1 jr k0 1: /* clear IV, pending IPIs */ mtc0 zero, CP0_CAUSE /* re-enable IRQs to wait for SW1 */ li k0, ST0_IE | ST0_BEV | STATUSF_IP1 mtc0 k0, CP0_STATUS /* set up CPU1 CBR; move BASE to 0xa000_0000 */ li k0, 0xff400000 mtc0 k0, $22, 6 /* set up relocation vector address based on thread ID */ mfc0 k1, $22, 3 srl k1, 16 andi k1, 0x8000 or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0 or k0, k1 li k1, 0xa0080000 sw k1, 0(k0) /* wait here for SW1 interrupt from bmips_boot_secondary() */ wait la k0, bmips_reset_nmi_vec li k1, CKSEG1 or k0, k1 jr k0 END(bmips_smp_movevec) /*********************************************************************** * Reset/NMI vector * For BMIPS processors that can relocate their exception vectors, this * entire function gets copied to 0x8000_0000. ***********************************************************************/ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) .set push .set noat .align 4 #ifdef CONFIG_SMP /* if the NMI bit is clear, assume this is a CPU1 reset instead */ li k1, (1 << 19) mfc0 k0, CP0_STATUS and k0, k1 beqz k0, soft_reset #if defined(CONFIG_CPU_BMIPS5000) mfc0 k0, CP0_PRID li k1, PRID_IMP_BMIPS5000 /* mask with PRID_IMP_BMIPS5000 to cover both variants */ andi k0, PRID_IMP_BMIPS5000 bne k0, k1, 1f /* if we're not on core 0, this must be the SMP boot signal */ li k1, (3 << 25) mfc0 k0, $22 and k0, k1 bnez k0, bmips_smp_entry 1: #endif /* CONFIG_CPU_BMIPS5000 */ #endif /* CONFIG_SMP */ /* nope, it's just a regular NMI */ SAVE_ALL move a0, sp /* clear EXL, ERL, BEV so that TLB refills still work */ mfc0 k0, CP0_STATUS li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE or k0, k1 xor k0, k1 mtc0 k0, CP0_STATUS BARRIER /* jump to the NMI handler function */ la k0, nmi_handler jr k0 RESTORE_ALL .set arch=r4000 eret #ifdef CONFIG_SMP soft_reset: #if defined(CONFIG_CPU_BMIPS5000) mfc0 k0, CP0_PRID andi k0, 0xff00 li k1, PRID_IMP_BMIPS5200 bne k0, k1, bmips_smp_entry /* if running on TP 1, jump to bmips_smp_entry */ mfc0 k0, $22 li k1, (1 << 24) and k1, k0 bnez k1, bmips_smp_entry nop /* * running on TP0, can not be core 0 (the boot core). * Check for soft reset. Indicates a warm boot */ mfc0 k0, $12 li k1, (1 << 20) and k0, k1 beqz k0, bmips_smp_entry /* * Warm boot. * Cache init is only done on TP0 */ la k0, bmips_5xxx_init jalr k0 nop b bmips_smp_entry nop #endif /*********************************************************************** * CPU1 reset vector (used for the initial boot only) * This is still part of bmips_reset_nmi_vec(). ***********************************************************************/ bmips_smp_entry: /* set up CP0 STATUS; enable FPU */ li k0, 0x30000000 mtc0 k0, CP0_STATUS BARRIER /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ mfc0 k0, CP0_CONFIG ori k0, 0x07 xori k0, 0x04 mtc0 k0, CP0_CONFIG mfc0 k0, CP0_PRID andi k0, 0xff00 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) li k1, PRID_IMP_BMIPS43XX bne k0, k1, 2f /* initialize CPU1's local I-cache */ li k0, 0x80000000 li k1, 0x80010000 mtc0 zero, $28 mtc0 zero, $28, 1 BARRIER 1: cache Index_Store_Tag_I, 0(k0) addiu k0, 16 bne k0, k1, 1b b 3f 2: #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ #if defined(CONFIG_CPU_BMIPS5000) /* mask with PRID_IMP_BMIPS5000 to cover both variants */ li k1, PRID_IMP_BMIPS5000 andi k0, PRID_IMP_BMIPS5000 bne k0, k1, 3f /* set exception vector base */ la k0, ebase lw k0, 0(k0) mtc0 k0, $15, 1 BARRIER #endif /* CONFIG_CPU_BMIPS5000 */ 3: /* jump back to kseg0 in case we need to remap the kseg1 area */ la k0, 1f jr k0 1: la k0, bmips_enable_xks01 jalr k0 /* use temporary stack to set up upper memory TLB */ li sp, BMIPS_WARM_RESTART_VEC la k0, plat_wired_tlb_setup jalr k0 /* switch to permanent stack and continue booting */ .global bmips_secondary_reentry bmips_secondary_reentry: la k0, bmips_smp_boot_sp lw sp, 0(k0) la k0, bmips_smp_boot_gp lw gp, 0(k0) la k0, start_secondary jr k0 #endif /* CONFIG_SMP */ .align 4 .global bmips_reset_nmi_vec_end bmips_reset_nmi_vec_end: END(bmips_reset_nmi_vec) .set pop /*********************************************************************** * CPU1 warm restart vector (used for second and subsequent boots). * Also used for S2 standby recovery (PM). * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) ***********************************************************************/ LEAF(bmips_smp_int_vec) .align 4 mfc0 k0, CP0_STATUS ori k0, 0x01 xori k0, 0x01 mtc0 k0, CP0_STATUS eret .align 4 .global bmips_smp_int_vec_end bmips_smp_int_vec_end: END(bmips_smp_int_vec) /*********************************************************************** * XKS01 support * Certain CPUs support extending kseg0 to 1024MB. ***********************************************************************/ LEAF(bmips_enable_xks01) #if defined(CONFIG_XKS01) mfc0 t0, CP0_PRID andi t2, t0, 0xff00 #if defined(CONFIG_CPU_BMIPS4380) li t1, PRID_IMP_BMIPS43XX bne t2, t1, 1f andi t0, 0xff addiu t1, t0, -PRID_REV_BMIPS4380_HI bgtz t1, 2f addiu t0, -PRID_REV_BMIPS4380_LO bltz t0, 2f mfc0 t0, $22, 3 li t1, 0x1ff0 li t2, (1 << 12) | (1 << 9) or t0, t1 xor t0, t1 or t0, t2 mtc0 t0, $22, 3 BARRIER b 2f 1: #endif /* CONFIG_CPU_BMIPS4380 */ #if defined(CONFIG_CPU_BMIPS5000) li t1, PRID_IMP_BMIPS5000 /* mask with PRID_IMP_BMIPS5000 to cover both variants */ andi t2, PRID_IMP_BMIPS5000 bne t2, t1, 2f mfc0 t0, $22, 5 li t1, 0x01ff li t2, (1 << 8) | (1 << 5) or t0, t1 xor t0, t1 or t0, t2 mtc0 t0, $22, 5 BARRIER #endif /* CONFIG_CPU_BMIPS5000 */ 2: #endif /* defined(CONFIG_XKS01) */ jr ra END(bmips_enable_xks01)
aixcc-public/challenge-001-exemplar-source
14,945
arch/mips/kernel/octeon_switch.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1994, 1995, 1996, by Andreas Busse * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ #include <asm/asm.h> #include <asm/export.h> #include <asm/asm-offsets.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) */ .align 7 LEAF(resume) .set arch=octeon mfc0 t1, CP0_STATUS LONG_S t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ dmfc0 t0, $11,7 /* CvmMemCtl */ bbit0 t0, 6, 3f /* Is user access enabled? */ /* Store the CVMSEG state */ /* Extract the size of CVMSEG */ andi t0, 0x3f /* Multiply * (cache line size/sizeof(long)/2) */ sll t0, 7-LONGLOG-1 li t1, -32768 /* Base address of CVMSEG */ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */ synciobdma 2: .set noreorder LONG_L t8, 0(t1) /* Load from CVMSEG */ subu t0, 1 /* Decrement loop var */ LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */ LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */ bnez t0, 2b /* Loop until we've copied it all */ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */ .set reorder /* Disable access to CVMSEG */ dmfc0 t0, $11,7 /* CvmMemCtl */ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ dmtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ move $28, a2 cpu_restore_nonscratch a1 PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 LONG_L a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS move v0, a0 jr ra END(resume) /* * void octeon_cop2_save(struct octeon_cop2_state *a0) */ .align 7 .set push .set noreorder LEAF(octeon_cop2_save) dmfc0 t9, $9,7 /* CvmCtl register. */ /* Save the COP2 CRC state */ dmfc2 t0, 0x0201 dmfc2 t1, 0x0202 dmfc2 t2, 0x0200 sd t0, OCTEON_CP2_CRC_IV(a0) sd t1, OCTEON_CP2_CRC_LENGTH(a0) /* Skip next instructions if CvmCtl[NODFA_CP2] set */ bbit1 t9, 28, 1f sd t2, OCTEON_CP2_CRC_POLY(a0) /* Save the LLM state */ dmfc2 t0, 0x0402 dmfc2 t1, 0x040A sd t0, OCTEON_CP2_LLM_DAT(a0) 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ sd t1, OCTEON_CP2_LLM_DAT+8(a0) /* Save the COP2 crypto state */ /* this part is mostly common to both pass 1 and later revisions */ dmfc2 t0, 0x0084 dmfc2 t1, 0x0080 dmfc2 t2, 0x0081 dmfc2 t3, 0x0082 sd t0, OCTEON_CP2_3DES_IV(a0) dmfc2 t0, 0x0088 sd t1, OCTEON_CP2_3DES_KEY(a0) dmfc2 t1, 0x0111 /* only necessary for pass 1 */ sd t2, OCTEON_CP2_3DES_KEY+8(a0) dmfc2 t2, 0x0102 sd t3, OCTEON_CP2_3DES_KEY+16(a0) dmfc2 t3, 0x0103 sd t0, OCTEON_CP2_3DES_RESULT(a0) dmfc2 t0, 0x0104 sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */ dmfc2 t1, 0x0105 sd t2, OCTEON_CP2_AES_IV(a0) dmfc2 t2, 0x0106 sd t3, OCTEON_CP2_AES_IV+8(a0) dmfc2 t3, 0x0107 sd t0, OCTEON_CP2_AES_KEY(a0) dmfc2 t0, 0x0110 sd t1, OCTEON_CP2_AES_KEY+8(a0) dmfc2 t1, 0x0100 sd t2, OCTEON_CP2_AES_KEY+16(a0) dmfc2 t2, 0x0101 sd t3, OCTEON_CP2_AES_KEY+24(a0) mfc0 v0, $15,0 /* Get the processor ID register */ sd t0, OCTEON_CP2_AES_KEYLEN(a0) li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ sd t1, OCTEON_CP2_AES_RESULT(a0) /* Skip to the Pass1 version of the remainder of the COP2 state */ beq v0, v1, 2f sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ dmfc2 t1, 0x0240 dmfc2 t2, 0x0241 ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ dmfc2 t3, 0x0242 subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ dmfc2 t0, 0x0243 sd t1, OCTEON_CP2_HSH_DATW(a0) dmfc2 t1, 0x0244 sd t2, OCTEON_CP2_HSH_DATW+8(a0) dmfc2 t2, 0x0245 sd t3, OCTEON_CP2_HSH_DATW+16(a0) dmfc2 t3, 0x0246 sd t0, OCTEON_CP2_HSH_DATW+24(a0) dmfc2 t0, 0x0247 sd t1, OCTEON_CP2_HSH_DATW+32(a0) dmfc2 t1, 0x0248 sd t2, OCTEON_CP2_HSH_DATW+40(a0) dmfc2 t2, 0x0249 sd t3, OCTEON_CP2_HSH_DATW+48(a0) dmfc2 t3, 0x024A sd t0, OCTEON_CP2_HSH_DATW+56(a0) dmfc2 t0, 0x024B sd t1, OCTEON_CP2_HSH_DATW+64(a0) dmfc2 t1, 0x024C sd t2, OCTEON_CP2_HSH_DATW+72(a0) dmfc2 t2, 0x024D sd t3, OCTEON_CP2_HSH_DATW+80(a0) dmfc2 t3, 0x024E sd t0, OCTEON_CP2_HSH_DATW+88(a0) dmfc2 t0, 0x0250 sd t1, OCTEON_CP2_HSH_DATW+96(a0) dmfc2 t1, 0x0251 sd t2, OCTEON_CP2_HSH_DATW+104(a0) dmfc2 t2, 0x0252 sd t3, OCTEON_CP2_HSH_DATW+112(a0) dmfc2 t3, 0x0253 sd t0, OCTEON_CP2_HSH_IVW(a0) dmfc2 t0, 0x0254 sd t1, OCTEON_CP2_HSH_IVW+8(a0) dmfc2 t1, 0x0255 sd t2, OCTEON_CP2_HSH_IVW+16(a0) dmfc2 t2, 0x0256 sd t3, OCTEON_CP2_HSH_IVW+24(a0) dmfc2 t3, 0x0257 sd t0, OCTEON_CP2_HSH_IVW+32(a0) dmfc2 t0, 0x0258 sd t1, OCTEON_CP2_HSH_IVW+40(a0) dmfc2 t1, 0x0259 sd t2, OCTEON_CP2_HSH_IVW+48(a0) dmfc2 t2, 0x025E sd t3, OCTEON_CP2_HSH_IVW+56(a0) dmfc2 t3, 0x025A sd t0, OCTEON_CP2_GFM_MULT(a0) dmfc2 t0, 0x025B sd t1, OCTEON_CP2_GFM_MULT+8(a0) sd t2, OCTEON_CP2_GFM_POLY(a0) sd t3, OCTEON_CP2_GFM_RESULT(a0) bltz v1, 4f sd t0, OCTEON_CP2_GFM_RESULT+8(a0) /* OCTEON III things*/ dmfc2 t0, 0x024F dmfc2 t1, 0x0050 sd t0, OCTEON_CP2_SHA3(a0) sd t1, OCTEON_CP2_SHA3+8(a0) 4: jr ra nop 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ dmfc2 t3, 0x0040 dmfc2 t0, 0x0041 dmfc2 t1, 0x0042 dmfc2 t2, 0x0043 sd t3, OCTEON_CP2_HSH_DATW(a0) dmfc2 t3, 0x0044 sd t0, OCTEON_CP2_HSH_DATW+8(a0) dmfc2 t0, 0x0045 sd t1, OCTEON_CP2_HSH_DATW+16(a0) dmfc2 t1, 0x0046 sd t2, OCTEON_CP2_HSH_DATW+24(a0) dmfc2 t2, 0x0048 sd t3, OCTEON_CP2_HSH_DATW+32(a0) dmfc2 t3, 0x0049 sd t0, OCTEON_CP2_HSH_DATW+40(a0) dmfc2 t0, 0x004A sd t1, OCTEON_CP2_HSH_DATW+48(a0) sd t2, OCTEON_CP2_HSH_IVW(a0) sd t3, OCTEON_CP2_HSH_IVW+8(a0) sd t0, OCTEON_CP2_HSH_IVW+16(a0) 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ jr ra nop END(octeon_cop2_save) .set pop /* * void octeon_cop2_restore(struct octeon_cop2_state *a0) */ .align 7 .set push .set noreorder LEAF(octeon_cop2_restore) /* First cache line was prefetched before the call */ pref 4, 128(a0) dmfc0 t9, $9,7 /* CvmCtl register. */ pref 4, 256(a0) ld t0, OCTEON_CP2_CRC_IV(a0) pref 4, 384(a0) ld t1, OCTEON_CP2_CRC_LENGTH(a0) ld t2, OCTEON_CP2_CRC_POLY(a0) /* Restore the COP2 CRC state */ dmtc2 t0, 0x0201 dmtc2 t1, 0x1202 bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */ dmtc2 t2, 0x4200 /* Restore the LLM state */ ld t0, OCTEON_CP2_LLM_DAT(a0) ld t1, OCTEON_CP2_LLM_DAT+8(a0) dmtc2 t0, 0x0402 dmtc2 t1, 0x040A 2: bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */ nop /* Restore the COP2 crypto state common to pass 1 and pass 2 */ ld t0, OCTEON_CP2_3DES_IV(a0) ld t1, OCTEON_CP2_3DES_KEY(a0) ld t2, OCTEON_CP2_3DES_KEY+8(a0) dmtc2 t0, 0x0084 ld t0, OCTEON_CP2_3DES_KEY+16(a0) dmtc2 t1, 0x0080 ld t1, OCTEON_CP2_3DES_RESULT(a0) dmtc2 t2, 0x0081 ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */ dmtc2 t0, 0x0082 ld t0, OCTEON_CP2_AES_IV(a0) dmtc2 t1, 0x0098 ld t1, OCTEON_CP2_AES_IV+8(a0) dmtc2 t2, 0x010A /* only really needed for pass 1 */ ld t2, OCTEON_CP2_AES_KEY(a0) dmtc2 t0, 0x0102 ld t0, OCTEON_CP2_AES_KEY+8(a0) dmtc2 t1, 0x0103 ld t1, OCTEON_CP2_AES_KEY+16(a0) dmtc2 t2, 0x0104 ld t2, OCTEON_CP2_AES_KEY+24(a0) dmtc2 t0, 0x0105 ld t0, OCTEON_CP2_AES_KEYLEN(a0) dmtc2 t1, 0x0106 ld t1, OCTEON_CP2_AES_RESULT(a0) dmtc2 t2, 0x0107 ld t2, OCTEON_CP2_AES_RESULT+8(a0) mfc0 t3, $15,0 /* Get the processor ID register */ dmtc2 t0, 0x0110 li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ dmtc2 t1, 0x0100 bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ dmtc2 t2, 0x0101 /* this code is specific for pass 1 */ ld t0, OCTEON_CP2_HSH_DATW(a0) ld t1, OCTEON_CP2_HSH_DATW+8(a0) ld t2, OCTEON_CP2_HSH_DATW+16(a0) dmtc2 t0, 0x0040 ld t0, OCTEON_CP2_HSH_DATW+24(a0) dmtc2 t1, 0x0041 ld t1, OCTEON_CP2_HSH_DATW+32(a0) dmtc2 t2, 0x0042 ld t2, OCTEON_CP2_HSH_DATW+40(a0) dmtc2 t0, 0x0043 ld t0, OCTEON_CP2_HSH_DATW+48(a0) dmtc2 t1, 0x0044 ld t1, OCTEON_CP2_HSH_IVW(a0) dmtc2 t2, 0x0045 ld t2, OCTEON_CP2_HSH_IVW+8(a0) dmtc2 t0, 0x0046 ld t0, OCTEON_CP2_HSH_IVW+16(a0) dmtc2 t1, 0x0048 dmtc2 t2, 0x0049 b done_restore /* unconditional branch */ dmtc2 t0, 0x004A 3: /* this is post-pass1 code */ ld t2, OCTEON_CP2_HSH_DATW(a0) ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ ld t0, OCTEON_CP2_HSH_DATW+8(a0) ld t1, OCTEON_CP2_HSH_DATW+16(a0) dmtc2 t2, 0x0240 ld t2, OCTEON_CP2_HSH_DATW+24(a0) dmtc2 t0, 0x0241 ld t0, OCTEON_CP2_HSH_DATW+32(a0) dmtc2 t1, 0x0242 ld t1, OCTEON_CP2_HSH_DATW+40(a0) dmtc2 t2, 0x0243 ld t2, OCTEON_CP2_HSH_DATW+48(a0) dmtc2 t0, 0x0244 ld t0, OCTEON_CP2_HSH_DATW+56(a0) dmtc2 t1, 0x0245 ld t1, OCTEON_CP2_HSH_DATW+64(a0) dmtc2 t2, 0x0246 ld t2, OCTEON_CP2_HSH_DATW+72(a0) dmtc2 t0, 0x0247 ld t0, OCTEON_CP2_HSH_DATW+80(a0) dmtc2 t1, 0x0248 ld t1, OCTEON_CP2_HSH_DATW+88(a0) dmtc2 t2, 0x0249 ld t2, OCTEON_CP2_HSH_DATW+96(a0) dmtc2 t0, 0x024A ld t0, OCTEON_CP2_HSH_DATW+104(a0) dmtc2 t1, 0x024B ld t1, OCTEON_CP2_HSH_DATW+112(a0) dmtc2 t2, 0x024C ld t2, OCTEON_CP2_HSH_IVW(a0) dmtc2 t0, 0x024D ld t0, OCTEON_CP2_HSH_IVW+8(a0) dmtc2 t1, 0x024E ld t1, OCTEON_CP2_HSH_IVW+16(a0) dmtc2 t2, 0x0250 ld t2, OCTEON_CP2_HSH_IVW+24(a0) dmtc2 t0, 0x0251 ld t0, OCTEON_CP2_HSH_IVW+32(a0) dmtc2 t1, 0x0252 ld t1, OCTEON_CP2_HSH_IVW+40(a0) dmtc2 t2, 0x0253 ld t2, OCTEON_CP2_HSH_IVW+48(a0) dmtc2 t0, 0x0254 ld t0, OCTEON_CP2_HSH_IVW+56(a0) dmtc2 t1, 0x0255 ld t1, OCTEON_CP2_GFM_MULT(a0) dmtc2 t2, 0x0256 ld t2, OCTEON_CP2_GFM_MULT+8(a0) dmtc2 t0, 0x0257 ld t0, OCTEON_CP2_GFM_POLY(a0) dmtc2 t1, 0x0258 ld t1, OCTEON_CP2_GFM_RESULT(a0) dmtc2 t2, 0x0259 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) dmtc2 t0, 0x025E subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ dmtc2 t1, 0x025A bltz v0, done_restore dmtc2 t2, 0x025B /* OCTEON III things*/ ld t0, OCTEON_CP2_SHA3(a0) ld t1, OCTEON_CP2_SHA3+8(a0) dmtc2 t0, 0x0051 dmtc2 t1, 0x0050 done_restore: jr ra nop END(octeon_cop2_restore) .set pop /* * void octeon_mult_save() * sp is assumed to point to a struct pt_regs * * NOTE: This is called in SAVE_TEMP in stackframe.h. It can * safely modify v1,k0, k1,$10-$15, and $24. It will * be overwritten with a processor specific version of the code. */ .p2align 7 .set push .set noreorder LEAF(octeon_mult_save) jr ra nop .space 30 * 4, 0 octeon_mult_save_end: EXPORT(octeon_mult_save_end) END(octeon_mult_save) LEAF(octeon_mult_save2) /* Save the multiplier state OCTEON II and earlier*/ v3mulu k0, $0, $0 v3mulu k1, $0, $0 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ v3mulu k0, $0, $0 sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */ ori k1, $0, 1 v3mulu k1, k1, $0 sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */ v3mulu k0, $0, $0 sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */ v3mulu k1, $0, $0 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ jr ra sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ octeon_mult_save2_end: EXPORT(octeon_mult_save2_end) END(octeon_mult_save2) LEAF(octeon_mult_save3) /* Save the multiplier state OCTEON III */ v3mulu $10, $0, $0 /* read P0 */ v3mulu $11, $0, $0 /* read P1 */ v3mulu $12, $0, $0 /* read P2 */ sd $10, PT_MTP+(0*8)(sp) /* store P0 */ v3mulu $10, $0, $0 /* read P3 */ sd $11, PT_MTP+(1*8)(sp) /* store P1 */ v3mulu $11, $0, $0 /* read P4 */ sd $12, PT_MTP+(2*8)(sp) /* store P2 */ ori $13, $0, 1 v3mulu $12, $0, $0 /* read P5 */ sd $10, PT_MTP+(3*8)(sp) /* store P3 */ v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ sd $11, PT_MTP+(4*8)(sp) /* store P4 */ v3mulu $10, $0, $0 /* read MPL1 */ sd $12, PT_MTP+(5*8)(sp) /* store P5 */ v3mulu $11, $0, $0 /* read MPL2 */ sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ v3mulu $12, $0, $0 /* read MPL3 */ sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ v3mulu $10, $0, $0 /* read MPL4 */ sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ v3mulu $11, $0, $0 /* read MPL5 */ sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ jr ra sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ octeon_mult_save3_end: EXPORT(octeon_mult_save3_end) END(octeon_mult_save3) .set pop /* * void octeon_mult_restore() * sp is assumed to point to a struct pt_regs * * NOTE: This is called in RESTORE_TEMP in stackframe.h. */ .p2align 7 .set push .set noreorder LEAF(octeon_mult_restore) jr ra nop .space 30 * 4, 0 octeon_mult_restore_end: EXPORT(octeon_mult_restore_end) END(octeon_mult_restore) LEAF(octeon_mult_restore2) ld v0, PT_MPL(sp) /* MPL0 */ ld v1, PT_MPL+8(sp) /* MPL1 */ ld k0, PT_MPL+16(sp) /* MPL2 */ /* Restore the multiplier state */ ld k1, PT_MTP+16(sp) /* P2 */ mtm0 v0 /* MPL0 */ ld v0, PT_MTP+8(sp) /* P1 */ mtm1 v1 /* MPL1 */ ld v1, PT_MTP(sp) /* P0 */ mtm2 k0 /* MPL2 */ mtp2 k1 /* P2 */ mtp1 v0 /* P1 */ jr ra mtp0 v1 /* P0 */ octeon_mult_restore2_end: EXPORT(octeon_mult_restore2_end) END(octeon_mult_restore2) LEAF(octeon_mult_restore3) ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ .word 0x718d0008 /* mtm0 $12, $13 restore MPL0 and MPL3 */ ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ .word 0x714b000c /* mtm1 $10, $11 restore MPL1 and MPL4 */ ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ ld $10, PT_MTP+(0*8)(sp) /* read P0 */ ld $11, PT_MTP+(3*8)(sp) /* read P3 */ .word 0x718d000d /* mtm2 $12, $13 restore MPL2 and MPL5 */ ld $12, PT_MTP+(1*8)(sp) /* read P1 */ .word 0x714b0009 /* mtp0 $10, $11 restore P0 and P3 */ ld $13, PT_MTP+(4*8)(sp) /* read P4 */ ld $10, PT_MTP+(2*8)(sp) /* read P2 */ ld $11, PT_MTP+(5*8)(sp) /* read P5 */ .word 0x718d000a /* mtp1 $12, $13 restore P1 and P4 */ jr ra .word 0x714b000b /* mtp2 $10, $11 restore P2 and P5 */ octeon_mult_restore3_end: EXPORT(octeon_mult_restore3_end) END(octeon_mult_restore3) .set pop
aixcc-public/challenge-001-exemplar-source
4,403
arch/mips/kernel/entry.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/compiler.h> #include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/isadep.h> #include <asm/thread_info.h> #ifndef CONFIG_PREEMPTION #define resume_kernel restore_all #else #define __ret_from_irq ret_from_exception #endif .text .align 5 #ifndef CONFIG_PREEMPTION FEXPORT(ret_from_exception) local_irq_disable # preempt stop b __ret_from_irq #endif FEXPORT(ret_from_irq) LONG_S s0, TI_REGS($28) FEXPORT(__ret_from_irq) /* * We can be coming here from a syscall done in the kernel space, * e.g. a failed kernel_execve(). */ resume_userspace_check: LONG_L t0, PT_STATUS(sp) # returning to kernel mode? andi t0, t0, KU_USER beqz t0, resume_kernel resume_userspace: local_irq_disable # make sure we dont miss an # interrupt setting need_resched # between sampling and return LONG_L a2, TI_FLAGS($28) # current->work andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) bnez t0, work_pending j restore_all #ifdef CONFIG_PREEMPTION resume_kernel: local_irq_disable lw t0, TI_PRE_COUNT($28) bnez t0, restore_all LONG_L t0, TI_FLAGS($28) andi t1, t0, _TIF_NEED_RESCHED beqz t1, restore_all LONG_L t0, PT_STATUS(sp) # Interrupts off? andi t0, 1 beqz t0, restore_all PTR_LA ra, restore_all j preempt_schedule_irq #endif FEXPORT(ret_from_kernel_thread) jal schedule_tail # a0 = struct task_struct *prev move a0, s1 jal s0 j syscall_exit FEXPORT(ret_from_fork) jal schedule_tail # a0 = struct task_struct *prev FEXPORT(syscall_exit) #ifdef CONFIG_DEBUG_RSEQ move a0, sp jal rseq_syscall #endif local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) # current->work li t0, _TIF_ALLWORK_MASK and t0, a2, t0 bnez t0, syscall_exit_work restore_all: # restore full frame .set noat RESTORE_TEMP RESTORE_AT RESTORE_STATIC restore_partial: # restore partial frame #ifdef CONFIG_TRACE_IRQFLAGS SAVE_STATIC SAVE_AT SAVE_TEMP LONG_L v0, PT_STATUS(sp) #if defined(CONFIG_CPU_R3000) and v0, ST0_IEP #else and v0, ST0_IE #endif beqz v0, 1f jal trace_hardirqs_on b 2f 1: jal trace_hardirqs_off 2: RESTORE_TEMP RESTORE_AT RESTORE_STATIC #endif RESTORE_SOME RESTORE_SP_AND_RET .set at work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) andi t0, a2, _TIF_WORK_MASK # is there any work to be done # other than syscall tracing? beqz t0, restore_all andi t0, a2, _TIF_NEED_RESCHED bnez t0, work_resched work_notifysig: # deal with pending signals and # notify-resume requests move a0, sp li a1, 0 jal do_notify_resume # a2 already loaded j resume_userspace_check FEXPORT(syscall_exit_partial) #ifdef CONFIG_DEBUG_RSEQ move a0, sp jal rseq_syscall #endif local_irq_disable # make sure need_resched doesn't # change between and return LONG_L a2, TI_FLAGS($28) # current->work li t0, _TIF_ALLWORK_MASK and t0, a2 beqz t0, restore_partial SAVE_STATIC syscall_exit_work: LONG_L t0, PT_STATUS(sp) # returning to kernel mode? andi t0, t0, KU_USER beqz t0, resume_kernel li t0, _TIF_WORK_SYSCALL_EXIT and t0, a2 # a2 is preloaded with TI_FLAGS beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead TRACE_IRQS_ON move a0, sp jal syscall_trace_leave b resume_userspace #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT) /* * MIPS32R2 Instruction Hazard Barrier - must be called * * For C code use the inline version named instruction_hazard(). */ LEAF(mips_ihb) .set MIPS_ISA_LEVEL_RAW jr.hb ra nop END(mips_ihb) #endif /* CONFIG_CPU_MIPSR2 - CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */